seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
324822817
|
import os
import numpy as np
import pytest
from sklearn.base import clone
from tensorflow.python.keras.testing_utils import get_test_data
from scikeras.wrappers import KerasClassifier, KerasRegressor
from .mlp_models import dynamic_classifier, dynamic_regressor
# Defaults
INPUT_DIM = 5
HIDDEN_DIM = 5
TRAIN_SAMPLES = 10
TEST_SAMPLES = 5
NUM_CLASSES = 2
BATCH_SIZE = 5
EPOCHS = 1
class TestRandomState:
@pytest.mark.parametrize(
"random_state", [0, 123, np.random.RandomState(0)],
)
@pytest.mark.parametrize(
"estimator",
[
KerasRegressor(
model=dynamic_regressor,
loss=KerasRegressor.r_squared,
model__hidden_layer_sizes=(100,),
),
KerasClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(100,)),
],
)
def test_random_states(self, random_state, estimator):
"""Tests that the random_state parameter correctly
engages deterministric training and prediction.
"""
(X, y), (_, _) = get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
# With seed
estimator.set_params(random_state=random_state)
estimator.fit(X, y)
y1 = estimator.predict(X)
estimator.fit(X, y)
y2 = estimator.predict(X)
assert np.allclose(y1, y2)
if isinstance(estimator, KerasRegressor):
# Without seed, regressors should NOT
# give the same results
# Classifiers _may_ give the same classes
estimator.set_params(random_state=None)
estimator.fit(X, y)
y1 = estimator.predict(X)
estimator.fit(X, y)
y2 = estimator.predict(X)
assert not np.allclose(y1, y2)
@pytest.mark.parametrize(
"estimator",
[
KerasRegressor(
model=dynamic_regressor,
loss=KerasRegressor.r_squared,
model__hidden_layer_sizes=(100,),
),
KerasClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(100,)),
],
)
@pytest.mark.parametrize("pyhash", [None, "0", "1"])
@pytest.mark.parametrize("gpu", [None, "0", "1"])
def test_random_states_env_vars(self, estimator, pyhash, gpu):
"""Tests that the random state context management correctly
handles TF related env variables.
"""
(X, y), (_, _) = get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
if "random_state" in estimator.get_params():
estimator.set_params(random_state=None)
estimator1 = clone(estimator)
estimator2 = clone(estimator)
if "random_state" in estimator1.get_params():
estimator1.set_params(random_state=0)
if "random_state" in estimator2.get_params():
estimator2.set_params(random_state=0)
if gpu is not None:
os.environ["TF_DETERMINISTIC_OPS"] = gpu
else:
if os.environ.get("TF_DETERMINISTIC_OPS"):
os.environ.pop("TF_DETERMINISTIC_OPS")
if pyhash is not None:
os.environ["PYTHONHASHSEED"] = pyhash
else:
if os.environ.get("PYTHONHASHSEED"):
os.environ.pop("PYTHONHASHSEED")
estimator1.fit(X, y)
estimator2.fit(X, y)
if gpu is not None:
assert os.environ["TF_DETERMINISTIC_OPS"] == gpu
else:
assert "TF_DETERMINISTIC_OPS" not in os.environ
if pyhash is not None:
assert os.environ["PYTHONHASHSEED"] == pyhash
else:
assert "PYTHONHASHSEED" not in os.environ
y1 = estimator1.predict(X)
y2 = estimator2.predict(X)
assert np.allclose(y1, y2)
if gpu is not None:
assert os.environ["TF_DETERMINISTIC_OPS"] == gpu
else:
assert "TF_DETERMINISTIC_OPS" not in os.environ
if pyhash is not None:
assert os.environ["PYTHONHASHSEED"] == pyhash
else:
assert "PYTHONHASHSEED" not in os.environ
def test_sample_weights_fit():
"""Checks that the `sample_weight` parameter when passed to `fit`
has the intended effect.
"""
# build estimator
estimator = KerasClassifier(
model=dynamic_classifier,
model__hidden_layer_sizes=(100,),
epochs=10,
random_state=0,
)
estimator1 = clone(estimator)
estimator2 = clone(estimator)
# we create 20 points
X = np.array([1] * 10000).reshape(-1, 1)
y = [1] * 5000 + [-1] * 5000
# heavily weight towards y=1 points
sw_first_class = [0.8] * 5000 + [0.2] * 5000
# train estimator 1 with weights
with pytest.warns(UserWarning, match="Setting the random state"):
estimator1.fit(X, y, sample_weight=sw_first_class)
# train estimator 2 without weights
with pytest.warns(UserWarning, match="Setting the random state"):
estimator2.fit(X, y)
# estimator1 should tilt towards y=1
# estimator2 should predict about equally
average_diff_pred_prob_1 = np.average(np.diff(estimator1.predict_proba(X), axis=1))
average_diff_pred_prob_2 = np.average(np.diff(estimator2.predict_proba(X), axis=1))
assert average_diff_pred_prob_2 < average_diff_pred_prob_1
# equal weighting
sw_equal = [0.5] * 5000 + [0.5] * 5000
# train estimator 1 with weights
estimator1.fit(X, y, sample_weight=sw_equal)
# train estimator 2 without weights
estimator2.fit(X, y)
# both estimators should have about the same predictions
np.testing.assert_allclose(
actual=estimator1.predict_proba(X), desired=estimator2.predict_proba(X)
)
def test_sample_weights_score():
"""Checks that the `sample_weight` parameter when passed to
`score` has the intended effect.
"""
# build estimator
estimator = KerasRegressor(
model=dynamic_regressor,
model__hidden_layer_sizes=(100,),
epochs=10,
random_state=0,
)
estimator1 = clone(estimator)
estimator2 = clone(estimator)
# we create 20 points
X = np.array([1] * 10000).reshape(-1, 1)
y = [1] * 5000 + [-1] * 5000
# train
estimator1.fit(X, y)
estimator2.fit(X, y)
# heavily weight towards y=1 points
bad_sw = [0.999] * 5000 + [0.001] * 5000
# score with weights, estimator2 should
# score higher since the weights "unbalance"
score1 = estimator1.score(X, y, sample_weight=bad_sw)
score2 = estimator2.score(X, y)
assert score2 > score1
def test_build_fn_default_params():
"""Tests that default arguments arguments of
`build_fn` are registered as hyperparameters.
"""
est = KerasClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(100,))
params = est.get_params()
# (100, ) is the default for dynamic_classifier
assert params["model__hidden_layer_sizes"] == (100,)
est = KerasClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(200,))
params = est.get_params()
assert params["model__hidden_layer_sizes"] == (200,)
| null |
tests/test_parameters.py
|
test_parameters.py
|
py
| 7,366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.python.keras.testing_utils.get_test_data",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 58,
"usage_type": "argument"
},
{
"api_name": "numpy.allclose",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_regressor",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "scikeras.wrappers.KerasRegressor.r_squared",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "scikeras.wrappers.KerasClassifier",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_classifier",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "tensorflow.python.keras.testing_utils.get_test_data",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sklearn.base.clone",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "sklearn.base.clone",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.environ.pop",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "os.environ.pop",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_regressor",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "scikeras.wrappers.KerasRegressor.r_squared",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "scikeras.wrappers.KerasClassifier",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_classifier",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasClassifier",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_classifier",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "sklearn.base.clone",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "sklearn.base.clone",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pytest.warns",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pytest.warns",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "scikeras.wrappers.KerasRegressor",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_regressor",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "sklearn.base.clone",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "sklearn.base.clone",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "scikeras.wrappers.KerasClassifier",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_classifier",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "scikeras.wrappers.KerasClassifier",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "mlp_models.dynamic_classifier",
"line_number": 219,
"usage_type": "name"
}
] |
107846496
|
"""
Bugs tests
- Columns aren't case-insensitive.
For debug information use:
pytest-3 --log-cli-level debug
"""
import os
import sys
import logging
# Look for the 'utils' module from where the script is running
prev_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if prev_dir not in sys.path:
sys.path.insert(0, prev_dir)
# Utils import
from utils import context # noqa: E402
def test_column_sensitive(test_dir):
""" Test if the COLUMN_ORDER section can contain columns in lowercase """
prj = 'links'
ext = 'csv'
ctx = context.TestContext(test_dir, 'ColumnSensitive', prj, ext, 'column_sensitive')
ctx.run()
out = prj + '.' + ext
heads = ctx.load_csv_header(out)
logging.debug(heads)
assert len(heads) == 4
ctx.clean_up()
def test_variants_issue_SG136_default(test_dir):
prj = 'kibom-variant_2'
ext = 'csv'
ctx = context.TestContext(test_dir, 'test_variants_issue_SG136_default', prj, ext)
extra = ['-r', 'default']
ctx.run(no_config_file=True, extra=extra)
out = prj + '_bom_A_(default).' + ext
rows, components = ctx.load_csv(out)
assert len(rows) == 1
assert len(components) == 2
assert 'R1' in components
assert 'R2' in components
assert 'C1' not in components
assert 'C2' not in components
ctx.clean_up()
def test_variants_issue_SG136_production(test_dir):
prj = 'kibom-variant_2'
ext = 'csv'
ctx = context.TestContext(test_dir, 'test_variants_issue_SG136_production', prj, ext, 'production')
ctx.run()
# ctx.run(no_config_file=True, extra=['-r', 'production'])
out = prj + '_bom_A_(production).' + ext
rows, components = ctx.load_csv(out)
assert len(rows) == 2
assert len(components) == 3
assert 'R1' in components
assert 'R2' in components
assert 'C1' not in components
assert 'C2' in components
ctx.clean_up()
def test_variants_issue_SG136_test(test_dir):
prj = 'kibom-variant_2'
ext = 'csv'
ctx = context.TestContext(test_dir, 'test_variants_issue_SG136_test', prj, ext)
extra = ['-r', 'test']
ctx.run(no_config_file=True, extra=extra)
out = prj + '_bom_A_(test).' + ext
rows, components = ctx.load_csv(out)
assert len(rows) == 2
assert len(components) == 3
assert 'R1' in components
assert 'R2' not in components
assert 'C1' in components
assert 'C2' in components
ctx.clean_up()
| null |
tests/test_bom/test_bugs.py
|
test_bugs.py
|
py
| 2,433 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "utils.context.TestContext",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.context",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "utils.context.TestContext",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "utils.context",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "utils.context.TestContext",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "utils.context",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "utils.context.TestContext",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "utils.context",
"line_number": 72,
"usage_type": "name"
}
] |
397308189
|
import glob
import random
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
# Normalization parameters for pre-trained PyTorch models
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5])
def denormalize(tensors):
""" Denormalizes image tensors using mean and std """
for c in range(3):
tensors[:, c].mul_(std[c]).add_(mean[c])
return torch.clamp(tensors, 0, 255)
class ImageDataset(Dataset):
def __init__(self, hr_root, lr_root, hr_shape):
hr_height, hr_width = hr_shape
# Transforms for low resolution images and high resolution images
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
self.transform_hr = transforms.Compose(
[transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
self.hr_files = sorted(glob.glob(hr_root + "/*.*"))
self.lr_files = sorted(glob.glob(lr_root + "/*.*"))
def __getitem__(self, index):
img_hr = Image.open(self.hr_files[index])
img_lr = Image.open(self.lr_files[index])
img_lr = self.transform(img_lr)
img_hr = self.transform_hr(img_hr)
return {"lr": img_lr, "hr": img_hr}
def __len__(self):
return len(self.hr_files)
| null |
src/dataset/datasets.py
|
datasets.py
|
py
| 1,504 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 46,
"usage_type": "name"
}
] |
116200954
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 20:33:58 2019
@author: Bohan
"""
import requests,json,hashlib,os,time
from pathlib import Path
from time import time, perf_counter
from fake_useragent import UserAgent
from xml.etree import ElementTree
#引入requests。
session = requests.session()
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
}
#add headers加请求头,前面有说过加请求头是为了模拟浏览器正常的访问,避免被反爬虫。
data={}
data['login']='[email protected]' #Replace this with you ACCOUNTNAME in phytozome.jgi.doe.gov账户名
data['password']='XXXXXXXX' #Replace this with you PASSWORD in phytozome.jgi.doe.gov密码
def sign_in():
global cookies_dict #define cookies_dict to store dict form cookie
global cookies_str
cookies_dict={}
url = 'https://signon-old.jgi.doe.gov/signon/create' #把登录的网址赋值给URL sign_in URL
session.post(url, headers=headers, data=data)
cookies_dict = requests.utils.dict_from_cookiejar(session.cookies)
cookies_str = json.dumps(cookies_dict)
f = open('cookies.txt', 'w')
f.write(cookies_str)
f.close()
# 以上7行代码,是登录网站并存储cookies,signin the phytozome and save cookies
def cookies_read():
cookies_txt = open('cookies.txt', 'r')
cookies_dict = json.loads(cookies_txt.read())
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
return (cookies)
# 以上4行代码,是cookies读取,read local cookies
def md5sum(filepath):
fd = open(filepath,"rb")
fcont = fd.read()
fd.close()
fmd5 = str(hashlib.md5(fcont).hexdigest())
return fmd5
#定义一个md5sum函数,返回校验值,def a function md5sum, to check and return md5 value of a certain file
def createpath(file_path):
try:
if not os.path.exists(file_path):
print ('文件夹',file_path,'不存在,重新建立') #print ('folder',file_path,'is not exist, created it')
#os.mkdir(file_path)
os.makedirs(file_path)
except IOError as e:
print ('文件操作失败',e) #print ('IOError',e)
except Exception as e:
print ('错误 :',e) #print ('Error',e)
#定义一个createpath函数,检测所在目录是否存在,不存在则建立文件夹,check filedirectory exisit or not, if not create that folder
def getxml():
global fileurl
fileurl=[]
PHYTOALL='Phytozome'
xmldata=session.get('https://genome.jgi.doe.gov/portal/ext-api/downloads/get-directory?organism=Phytozome&organizedByFileType=false')
#输入API指定的版本名称
with open('./'+PHYTOALL+'.xml','wb') as xf:
xf.write(xmldata.content)
#下载对应版本的官方xml文件
xmlDoc = ElementTree.parse('./'+PHYTOALL+'.xml') #读取并使用ElementTree解析PhytozomeV12.xml文件,并命名为xmlDoc
folderl1 = xmlDoc.findall('folder') #使用findall功能找出子一级folder列表
print('目前数据库中有以下版本:\n') #print('The database have these Versions:\n')
number=1
for folderl1x in folderl1: #遍历一级folder列表
print(str(number)+'. '+folderl1x.attrib['name'])
number=number+1
pick=input('Pleas choose which version you want,input with number:\nFor example:2 After your input,pree Enter\n') #pick=input('Pleas choose which version you want,input with number:\nFor example:2 After your input,pree Enter.\n')
folderl1name =folderl1[int(pick)-1]
folderl2 = folderl1name.findall('folder') #使用findall功能找出子二级folder列表
folderl2f = folderl1name.findall('file')
for folderl2fname in folderl2f:
folderpathl2 = "./"+ str(folderl1name.get('name'))+ "/"
fileurl.append(folderpathl2)
fileurl.append(folderl2fname.get('filename'))
fileurl.append('https://genome.jgi.doe.gov'+folderl2fname.get('url'))
fileurl.append(folderl2fname.get('md5'))
for folderl2name in folderl2: #遍历二级folder列表
folderl3 = folderl2name.findall('folder') #使用findall功能找出子三级folder列表
folderl3f = folderl2name.findall('file')
for folderl3fname in folderl3f:
folderpathl3 = "./"+ str(folderl1name.get('name'))+"/"+ str(folderl2name.get('name')) + "/"
fileurl.append(folderpathl3)
fileurl.append(folderl3fname.get('filename'))
fileurl.append('https://genome.jgi.doe.gov'+folderl3fname.get('url'))
fileurl.append(folderl3fname.get('md5'))
for folderl3name in folderl3: #遍历三级folder列表
folderl4 = folderl3name.findall('folder') #使用findall功能找出子4级folder列表
folderl4f = folderl3name.findall('file')
for folderl4fname in folderl4f:
folderpathl4 = "./"+ str(folderl1name.get('name'))+"/"+ str(folderl2name.get('name')) + "/" +str(folderl3name.get('name'))+ "/"
fileurl.append(folderpathl4)
fileurl.append(folderl4fname.get('filename'))
fileurl.append('https://genome.jgi.doe.gov'+folderl4fname.get('url'))
fileurl.append(folderl4fname.get('md5'))
for folderl4name in folderl4: #遍历4级folder列表
folderl5 = folderl4name.findall('folder') #使用findall功能找出子5级folder列表
folderl5f = folderl4name.findall('file')
for folderl5fname in folderl5f:
folderpathl5 = "./"+ str(folderl1name.get('name')) + "/" + str(folderl2name.get('name')) + "/" + str(folderl3name.get('name')) + "/"+ str(folderl4name.get('name')) + "/"
fileurl.append(folderpathl5)
fileurl.append(folderl5fname.get('filename'))
fileurl.append('https://genome.jgi.doe.gov'+folderl5fname.get('url'))
fileurl.append(folderl5fname.get('md5'))
file = open("./genome.links","w")
file.write(str(fileurl))
file.close()
return fileurl
#解析官方xml文件,将对应文件名称、路径以及MD5值存取至genom.links文件,格式为列表形式,4个数值循环存储,1路径,2文件名,3URL,4MD5值
def gettasklist():
global tasklist
tasklist={}
for i in range(int(len(fileurl)/4)):
onefilelist=[]
onefilelist.append(fileurl[i*4+2])
onefilelist.append(fileurl[i*4]+fileurl[i*4+1])
onefilelist.append(fileurl[i*4+3])
tasklist[i]=onefilelist
return tasklist
#合并文件路径和文件名,合成tasklist,格式为1URL,2路径+文件名,3MD5值
def download_file_from_url(dl_url, file_name, md5, headers):
file_path = Path(__file__).parent.joinpath(file_name)
if file_path.exists():
dl_size = file_path.stat().st_size #if file exits, get downloaded file size
else:
dl_size = 0
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
'Cookie': cookies_dict} #include cookie into request headers
headers['Range'] = f'bytes={dl_size}-'
response = session.get(dl_url, stream=True) #use seesion get content via stream
print('\n\n' + '*' * 30 + '下载信息' + '*' * 30) #print('\n\n' + '*' * 30 + 'Downloading Information' + '*' * 30)
try:
total_size = int(response.headers['content-length']) #if server respond with content length, that could be continue download
print(
f'\n\n文件名称:{file_name}\t\t已下载文件大小:{dl_size / 1024 / 1024:.2f}M\t\t文件总大小:{total_size/1024/1024:.2f}M\n\n该文件支持断点续传\n') #print(f'\n\nCurrent downloading:{file_name}\t\tDownloaded:{dl_size / 1024 / 1024:.2f}M\t\tThis file supports continue downloading, downloading......\n')
start = perf_counter()
data_count = 0
count_tmp = 0
start_time = time()
with open(file_path, 'ab') as fp: #if server respond with content length, that could be continue download, writ file with ab model, append
for chunk in response.iter_content(chunk_size=512):
data_count += len(chunk)
now_pross = (data_count / total_size) * 100
mid_time = time()
if mid_time - start_time > 0.1:
speed = (data_count - count_tmp) / 1024 / (mid_time - start_time)
start_time = mid_time
count_tmp = data_count
print(
f"\rDownloading.........{now_pross:.2f}%\t{data_count//1024}Kb/{total_size//1024}Kb\t当前下载速度:{speed:.2f}Kb/s", end='') #f'\n\nDownloaded!Total used:{diff:.2f} seconds, Average downloading speed:{speed:.2f}Kb/s!
fp.write(chunk)
end = perf_counter()
diff = end - start
speed = total_size/1024/diff
print(
f'\n\n下载完成!耗时:{diff:.2f}秒, 平均下载速度:{speed:.2f}Kb/s!\n文件路径:{file_path}\n')
except KeyError: #if server respond with no content length, that means you should writ file with wb model, rewrite
print(f'\n\n当前文件名称:{file_name}\t\t已下载文件大小:{dl_size / 1024 / 1024:.2f}M\t\t该文件服务器不支持断点续传,重新开始下载\n') #print(f'\n\nCurrent downloading:{file_name}\t\tDownloaded:{dl_size / 1024 / 1024:.2f}M\t\tThis file doesn't supports continue downloading,restart to download this file.\n')
start = perf_counter()
data_count = 0
count_tmp = 0
start_time = time()
with open(file_path, 'wb') as fp:
for chunk in response.iter_content(chunk_size=512):
data_count += len(chunk)
mid_time = time()
if mid_time - start_time > 0.1:
speed = (data_count - count_tmp) / 1024 / (mid_time - start_time)
start_time = mid_time
count_tmp = data_count
print(
f"\rDownloading.........\t{data_count//1024}Kb当前下载速度:{speed:.2f}Kb/s", end='') #f"\rDownloading.........\t{data_count//1024}KbCurrent downloading speed:{speed:.2f}Kb/s", end='')
fp.write(chunk)
end = perf_counter()
diff = end - start
speed = data_count/1024/diff
print(
f'\n\n下载完成!耗时:{diff:.2f}秒, 平均下载速度:{speed:.2f}Kb/s!\n文件路径:{file_path}\n') #f'\n\nDownloaded!Total used:{diff:.2f} seconds, Average downloading speed:{speed:.2f}Kb/s!\nFile Path:{file_path}\n')
fmd5=md5sum(file_name)
if fmd5 == md5: #check intergrity of file
print('文件校验成功!')
else:
print('文件校验失败')
def paralleldownload():
for j in range(int(len(tasklist))):
try:
if md5sum(tasklist[j][1]) != tasklist[j][2]:
download_file_from_url(tasklist[j][0],tasklist[j][1],tasklist[j][2],headers)
else:
print('第'+str(j+1)+'个文件已存在且与本地文件一致') #print('The No.'+str(j+1)+'file is already existing, and it don't need to be download again')
except FileNotFoundError as e:
print('共计'+str(int(len(tasklist)))+'个文件,'+'目前开始下载第'+str(j+1)+'个文件') #print('There are total'+str(int(len(tasklist)))+'files,'+'We are downloading the number:'+str(j+1))
download_file_from_url(tasklist[j][0],tasklist[j][1],tasklist[j][2],headers)
sign_in()
getxml() #GETXML
gettasklist() #GETtasklist
for i in range(int(len(fileurl)/4)):
createpath(fileurl[i*4]) #解析官方xml文件,在根目录下创建所有子目录
paralleldownload()
| null |
phytozomedownloaderV1.1(Chinese_English).py
|
phytozomedownloaderV1.1(Chinese_English).py
|
py
| 12,291 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.utils.dict_from_cookiejar",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.utils",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.utils.cookiejar_from_dict",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.utils",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "hashlib.md5",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 193,
"usage_type": "call"
}
] |
403453705
|
import cv2
import numpy as np
from skimage import measure
def regiao_int(imagem,img,pix):
# Binarizacao da imagem
ret, thresh = cv2.threshold(imagem, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Rotula as regiões conectadas da matriz(imagem)
labels = measure.label(thresh, neighbors=4, background=255)
#print(labels)
# Criação de um arranjo de qualquer formato contendo apenas zeros
mask = np.zeros(thresh.shape, dtype="uint8")
# Laco sobre os componentes encontrados da imagem
for label in np.unique(labels):
# Se este for o rótulo de fundo, ignore-o
if label == 0: continue
# Caso contrário, construa a máscara de etiqueta
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# verifica o numero de pixels de cada componente
if numPixels > pix[0] and numPixels < pix[1]:
mask = cv2.add(mask, labelMask)
# Mascara de recorte do bovino na imagem de entrada
ret, thresh1 = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
res = cv2.bitwise_and(img, img, mask=thresh1)
masc = res.copy()
# Eliminar ruidos de luminosidade
for i in range(0, res.shape[0]):
for j in range(0, res.shape[1]):
(b, g, r) = res[i, j]
if (r > 110 and g > 110 and b > 110):
masc[i, j] = (0, 0, 0)
masc = cv2.cvtColor(masc, cv2.COLOR_BGR2GRAY)
ret, masc = cv2.threshold(masc, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return masc
| null |
proj_mosca/mosca/m_contornos.py
|
m_contornos.py
|
py
| 1,608 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.threshold",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "skimage.measure.label",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "skimage.measure",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.countNonZero",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.add",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 47,
"usage_type": "attribute"
}
] |
38741978
|
import os
import io
from straceParserLib.StraceParser import StraceParser
class Stana:
def __init__(self, *args, **kwargs):
self.__enabledPlugins = {}
self.__file = None
self.__options = None
pass
def enablePlugin(self, pluginName, pluginOptions=None):
"""Enable plugin named pluginName
Optionally also set plugin options through the pluginOptions parameter."""
try:
plugin = __import__("statPlugins."+pluginName, globals(), locals(), [pluginName])
except Exception as e:
raise Exception("plugin {} couldn't be loaded ".format(pluginName))
pluginObj = getattr(plugin, pluginName)()
self.__enabledPlugins[pluginName] = pluginObj
if not pluginOptions:
return
self.enablePluginOptions(pluginName, pluginOptions)
def enablePluginOptions(self, pluginName, Options):
"""Set Options of plugin pluginName"""
try:
pluginObj = self.__enabledPlugins[pluginName]
except KeyError:
raise Exception("Plugin {} not loaded".format(pluginName))
if not pluginObj.setOption(Options):
raise Exception("Plugin {} doesn't understand passed options".format(pluginName))
def setFile(self, fileName):
if os.path.isfile(fileName):
self.__file = fileName
else:
raise IOError("{} does not exist".format(fileName))
def parse(self, file=None):
"""Starts parsing strace input
the file to parse may be specified in the file parameter,
in which case it overrides any file set using the setFile() method."""
self.parser = StraceParser()
for plugin in self.__enabledPlugins:
hooks = self.__enabledPlugins[plugin].getSyscallHooks()
if hooks:
for syscall, func in hooks.items():
self.parser.registerSyscallHook(syscall, func)
hooks = self.__enabledPlugins[plugin].getRawSyscallHooks()
if hooks:
for syscall, func in hooks.items():
self.parser.registerRawSyscallHook(syscall, func)
with io.open(self.__file, 'r', 1) as f:
if not self.__options:
self.__options = self.parser.autoDetectFormat(f)
else:
pass # TODO : finish
for plugin in self.__enabledPlugins:
ret = self.__enabledPlugins[plugin].isOperational(self.__options)
if not ret:
raise Exception("required strace options not met for {}".format(plugin))
self.parser.startParse(f, self.__options)
def getResults(self, pluginName=None):
"""Returns object created by plugin
If pluginName is None, will return a dict of plugins
Otherwise only the returnObj of the specified plugin will be returned
Raises an exception if plugin is not enabled."""
if pluginName:
try:
return self.__enabledPlugins[pluginName].getOutputObject()
except KeyError:
raise Exception("{} not loaded, can't print".format(pluginName))
else:
ret = {}
for plugin in self.__enabledPlugins:
ret[plugin] = (self.__enabledPlugins[plugin].getOutputObject())
return ret
def printResults(self, pluginName=None):
"""Print plugin results to stdout
If pluginName is None, all enabled plugins will print one by one
Otherwise only the specified plugin will print.
Raises an exception if plugin is not enabled."""
if pluginName:
try:
self.__enabledPlugins[pluginName].printOutput()
except KeyError:
raise Exception("{} not loaded, can't print".format(pluginName))
else:
for plugin in self.__enabledPlugins:
self.__enabledPlugins[plugin].printOutput()
def __listPlugins(self):
return self.__enabledPlugins.keys()
if __name__ == '__main__':
print ("running some tests...")
import doctest
doctest.testmod()
| null |
Stana.py
|
Stana.py
|
py
| 4,187 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.isfile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "straceParserLib.StraceParser.StraceParser",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "doctest.testmod",
"line_number": 120,
"usage_type": "call"
}
] |
390781211
|
#!/usr/bin/env python
"""High-level planner that manages A* paths and environment obstacles."""
import rospy
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import MapMetaData
import numpy as np
import matplotlib.pyplot as plt
import tf
from std_msgs.msg import Float32MultiArray
from astar import AStar, DetOccupancyGrid2D, StochOccupancyGrid2D
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
class Navigator:
def __init__(self):
rospy.init_node('turtlebot_navigator', anonymous=True)
# Occupancy grid parameters
self.plan_resolution = 0.25
self.plan_horizon = 15
self.map_width = 0
self.map_height = 0
self.map_resolution = 0
self.map_origin = [0, 0]
self.map_probs = []
self.occupancy = None
self.nav_sp = None
self.short_path_len = 2 # don't go below 2 or else always head to path start
self.trans_listener = tf.TransformListener()
# Robot status
self.has_robot_location = False
self.robot_translation = (0, 0, 0)
self.robot_rotation = (0, 0, 0, 1)
# Mark completed waypoints off existing plan
self.prev_nav_sp = None
self.prev_astar = None
self.wp_complete_thresh = 0.35 # m, mark waypoint completed within thresh
self.path_abandon_thresh = 0.5 # m, replan if off path
rospy.Subscriber("map", OccupancyGrid, self.map_callback)
rospy.Subscriber("map_metadata", MapMetaData, self.map_md_callback)
rospy.Subscriber("/turtlebot_controller/nav_goal", Float32MultiArray, self.nav_sp_callback)
self.pose_sp_pub = rospy.Publisher('/turtlebot_controller/position_goal', Float32MultiArray, queue_size=10)
self.nav_path_pub = rospy.Publisher('/turtlebot_controller/path_goal', Path, queue_size=10)
def map_md_callback(self,msg):
self.map_width = msg.width
self.map_height = msg.height
self.map_resolution = msg.resolution
self.map_origin = (msg.origin.position.x,msg.origin.position.y)
def map_callback(self,msg):
self.map_probs = msg.data
if self.map_width>0 and self.map_height>0 and len(self.map_probs)>0:
self.occupancy = StochOccupancyGrid2D(self.map_resolution,
self.map_width,
self.map_height,
self.map_origin[0],
self.map_origin[1],
int(self.plan_resolution / self.map_resolution) * 2,
self.map_probs)
def nav_sp_callback(self,msg):
# Unpack commanded goal
self.nav_sp = (msg.data[0],msg.data[1],msg.data[2])
# Update our knowledge of robot (x,y,th)
self.robot_state()
# Goal has changed -> Replan
if self.nav_sp != self.prev_nav_sp:
self.send_pose_sp()
return
# Still moving toward previous goal
# Check if existing path still valid
if self.prev_astar:
self.prev_astar.occupancy = self.occupancy
if self.check_existing_path() and len(self.prev_astar.path) > self.short_path_len:
# Old path is valid
# Abandon this path if robot deviated too much -> Replan
if distance_to_line(self.prev_astar.path[0], self.prev_astar.path[1],
self.robot_translation[:2]) > self.path_abandon_thresh:
rospy.loginfo("Abandoning plan")
self.send_pose_sp()
return
# Otherwise, try to mark waypoints as completed
# Note: We loop through waypoints in order, so we won't throw out
# important future waypoints that e.g. curve around nearby wall
changed = False
while len(self.prev_astar.path) > self.short_path_len:
next_wp = self.prev_astar.path[1]
# Close enough to next waypoint, finished
if self.finished_waypoint(next_wp):
changed = True
# Note: assign completed waypoint as start; this keeps
# the path display in rviz looking normal
self.prev_astar.path[0] = self.prev_astar.path.pop(1)
# Next waypoint still valid; don't remove anything more
else:
break
# Publish modified previous path
if changed:
rospy.loginfo("Updating previous navigation plan")
else:
rospy.loginfo("Using existing navigation plan")
wp_x, wp_y, wp_th = self.next_waypoint(self.prev_astar)
self.publish_path(self.prev_astar, wp_x, wp_y, wp_th)
# Old path invalid -> Replan
else:
self.send_pose_sp()
return
# No existing plan -> Replan
else:
self.send_pose_sp()
return
def check_existing_path(self):
"""Check existing path still free of obstacles."""
for coords in self.prev_astar.path:
if not self.prev_astar.is_free(coords):
return False
return True
def finished_waypoint(self, x):
"""Check if robot within threshold of planned waypoint"""
dist = np.linalg.norm(np.array(self.robot_translation[:2]) - np.array(x))
return dist < self.wp_complete_thresh
def robot_state(self):
"""Queries robot state from map."""
try:
(self.robot_translation,
self.robot_rotation) = self.trans_listener.lookupTransform("/map","/base_footprint",rospy.Time(0))
self.has_robot_location = True
except (tf.LookupException, tf.ConnectivityException,
tf.ExtrapolationException):
self.robot_translation = (0, 0, 0)
self.robot_rotation = (0, 0, 0, 1)
self.has_robot_location = False
return
def send_pose_sp(self):
if self.occupancy and self.has_robot_location and self.nav_sp:
state_min = (-int(round(self.plan_horizon)), -int(round(self.plan_horizon)))
state_max = (int(round(self.plan_horizon)), int(round(self.plan_horizon)))
# Round initial, goal positions to grid resolution
x_init = round_pt_to_grid(self.robot_translation[:2], self.plan_resolution)
x_goal = round_pt_to_grid(self.nav_sp[:2], self.plan_resolution)
astar = AStar(state_min, state_max, x_init, x_goal, self.occupancy,
self.plan_resolution)
# uncomment to add buffering to obstacles
bufferRadius = 4
astar.bufferOccupancy(bufferRadius)
rospy.loginfo("Computing new navigation plan")
if astar.solve():
# If initial state == goal, path len == 1
# Handle case where A* solves, but no 2nd element -> don't send msg
if len(astar.path) < self.short_path_len:
rospy.loginfo("Path goal matches current state")
# Typical use case
else:
wp_x, wp_y, wp_th = self.next_waypoint(astar)
self.publish_path(astar, wp_x, wp_y, wp_th)
else:
rospy.logwarn("Could not find path")
def next_waypoint(self, astar):
"""Obtains next path waypoint, accounting for intermediate headings"""
# Next waypoint calculations
wp_x = astar.path[self.short_path_len - 1][0]
wp_y = astar.path[self.short_path_len - 1][1]
# Far from goal - do intermediate heading calcs
if len(astar.path) > self.short_path_len:
dx = wp_x - self.robot_translation[0]
dy = wp_y - self.robot_translation[1]
wp_th = np.arctan2(dy, dx)
# Next point on path is the goal - use final goal pose
else:
wp_th = self.nav_sp[2]
return wp_x, wp_y, wp_th
def publish_path(self, astar, wp_x, wp_y, wp_th):
"""Publishes single waypoint goal and full path"""
# Publish next waypoint
pose_sp = (wp_x, wp_y, wp_th)
msg = Float32MultiArray()
msg.data = pose_sp
self.pose_sp_pub.publish(msg)
# Publish full path
path_msg = Path()
path_msg.header.frame_id = 'map'
for state in astar.path:
pose_st = PoseStamped()
pose_st.pose.position.x = state[0]
pose_st.pose.position.y = state[1]
pose_st.header.frame_id = 'map'
path_msg.poses.append(pose_st)
self.nav_path_pub.publish(path_msg)
self.prev_nav_sp = self.nav_sp
self.prev_astar = astar
def run(self):
rospy.spin()
def round_pt_to_grid(pt, grid_res):
"""Rounds coordinate point to nearest grid coordinates"""
steps = 1 / grid_res
return tuple([round(coord*steps) / steps for coord in pt])
def distance_to_line(start, end, x):
x1,y1 = start
x2,y2 = end
x0,y0 = x
# Distance from x to line defined by 2 points, start/end
# https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
num = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)
denom = np.sqrt((y2-y1)**2 + (x2-x1)**2)
dist = num/denom
return dist
if __name__ == '__main__':
nav = Navigator()
nav.run()
| null |
scripts/navigator.py
|
navigator.py
|
py
| 9,701 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rospy.init_node",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tf.TransformListener",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "nav_msgs.msg.OccupancyGrid",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "nav_msgs.msg.MapMetaData",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Float32MultiArray",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Float32MultiArray",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "nav_msgs.msg.Path",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "astar.StochOccupancyGrid2D",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "rospy.Time",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "tf.LookupException",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "tf.ConnectivityException",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "tf.ExtrapolationException",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "astar.AStar",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "astar.bufferOccupancy",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "astar.solve",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "astar.path",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "rospy.loginfo",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "rospy.logwarn",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "astar.path",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "astar.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "astar.path",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.arctan2",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Float32MultiArray",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "nav_msgs.msg.Path",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "astar.path",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "geometry_msgs.msg.PoseStamped",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "rospy.spin",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 249,
"usage_type": "call"
}
] |
287205978
|
#!/usr/bin/env python
import gnupg
gpg = gnupg.GPG(gnupghome='/root/.gnupg')
publickey = open('Sylar.pub','rb')
key_data = publickey.read()
publickey.close()
import_result = gpg.import_keys(key_data)
public_keys = gpg.list_keys()
pgpfile = open('/etc/rc.local','rb')
pgpdata = pgpfile.read()
pgpfile.close()
encrypted_ascii_data = gpg.encrypt(pgpdata, 'Sylar', output='./test.gpg')
| null |
gpg文件加密、解密/encry.py
|
encry.py
|
py
| 386 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gnupg.GPG",
"line_number": 4,
"usage_type": "call"
}
] |
517201907
|
from scipy.io.wavfile import read as wavread
from scipy.io.wavfile import write as wavwrite
import numpy as np
import os.path
# Calculations to determine length of kernel at running_mean for a given cut-off frequency.
def calcH(n, f, fs): # calculates transfer function
w = 2 * np.pi * f / fs
h = 1.0 / n * np.sin(w * n / 2) / np.sin(w / 2)
return h
# gets filter width
def searchN(f, H, fs):
for n in range(1, 1000):
HThis = calcH(n, f, fs)
if HThis < H:
break
if np.abs(H - calcH(n, f, fs)) > np.abs(H - calcH(n - 1, f, fs)):
return n - 1
else:
return n
# gets filter width for various frequencies.
def getNs(fs=22050, H=0.5, fArray=np.linspace(100, 5000, 50), verbose=0):
resultDict = {}
for i in range(len(fArray)):
f = fArray[i]
n = searchN(f, H, fs)
if verbose:
print('f: ' + str(f) + '\tN: ' + str(n) + '\t real H: ' + str(calcH(n, f, fs)))
if n not in resultDict.values():
resultDict[f] = n
return resultDict
def running_mean(x, windowSize):
padded = np.zeros((len(x)+windowSize-1,))
padded[int(np.floor((windowSize-1)/2)):-int(np.ceil((windowSize-1)/2))] = x # pads input with 0-s, so output will have the same lengthas input
cumsum = np.cumsum(padded,dtype = float)
cumsum[windowSize:] = cumsum[windowSize:] - cumsum[:-windowSize]
return cumsum[windowSize-1:] / windowSize
# scale to -1.0 -- 1.0
# if x.dtype == 'int16':
# nb_bits = 16 # -> 16-bit wav files
# elif x.dtype == 'int32':
# nb_bits = 32 # -> 32-bit wav files
# max_nb_bit = float(2 ** (nb_bits - 1))
# x = x / (max_nb_bit + 1.0) # samples is a numpy array of float representing the samples
# wavwrite('noisedfiltered1.wav',22050, x)
def getFilteredDataList(inputFileName, load = True, save = True):
frequencyFilterWidthMap = getNs()
result = []
[originalSampleRate, original] = wavread(inputFileName)
for cutOffFrequency in frequencyFilterWidthMap:
outputFileName = inputFileName + '_noisedfiltered_' + str(cutOffFrequency) + '.wav'
if os.path.isfile(outputFileName) and load:
print("Loading file ", outputFileName, " from disk." )
[sampleRate, x] = wavread(outputFileName)
if sampleRate != originalSampleRate:
raise ValueError("Sample rate of file ", outputFileName, " does not eaqual the sample rate of",
" the original file " , inputFileName)
else:
windowSize = frequencyFilterWidthMap[cutOffFrequency]
print('Generating noisedfiltered ', cutOffFrequency, ' data, with windowSize ', windowSize)
x = running_mean(original, windowSize).astype('int16')
x = x + np.random.normal(0, 10, len(x)).astype('int16') # to add noise.
if save:
wavwrite(outputFileName, originalSampleRate, x)
print("saved: ", outputFileName)
if len(x) != len(original):
raise ValueError("Filtering the wav file failed. Original input is ", len(original), " long",
"but the filtered data is " , len(x) , " long.")
result.append(x)
return (result ,originalSampleRate)
| null |
generateWavs.py
|
generateWavs.py
|
py
| 3,310 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.pi",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.wavfile.write",
"line_number": 75,
"usage_type": "call"
}
] |
398257788
|
'''
Created on Jun 6, 2016
@author: airingzhang
'''
import numpy as np
import glob as glob
import sys
import os
from deepnet import deepnet_pb2
from google.protobuf import text_format
class FeatureParser (object):
'''
This file is used for audio project with Prof. Chen to parse the concatenated Features.
Also it provide method to group features
'''
def __init__(self):
'''
Constructor
'''
self.featureGroups = ['ZCR', 'Energy', 'Spectral', 'Chroma', 'PLP', 'MFCC']
self.featureGroupsIndex = [0, 401, 1203, 2406, 7218, 12405, 17592]
self.featureGroupsDict = {'ZCR':[(0,399)], 'Energy':[(401, 800), (802,1201)],
'Spectral':[(1203,1602),(1604,2003),(2005, 2404)], 'Chroma': [(2406,7194)],
'PLP':[(7218,12405)], 'MFCC':[(12405,17592)]}
self.subPersonDir = ['P1', 'P1222', 'P176', 'P201', 'P221', 'P241', 'P252', 'P255', 'P3',
'P331', 'P5', 'P599', 'P601', 'P8', 'P9', 'P1001', 'P169', 'P2', 'P21',
'P231', 'P251', 'P253', 'P256', 'P330', 'P4', 'P50', 'P6', 'P7', 'P86']
self.subTypeDir = ['Type1', 'Type2', 'Type3', 'Type4']
self.subOtherDir = ['env', 's']
def ParsePerson(self, baseDir, ne=True, withMS = False):
self.baseDir = baseDir
for person in self.subPersonDir:
instanceCount = 0
dataPb = deepnet_pb2.Dataset()
outputProtoFile = os.path.join(self.baseDir, person,'data.pbtxt')
for i, feature in enumerate(self.featureGroups):
data = deepnet_pb2.Dataset.Data()
data.name = person+"_"+feature
data.file_pattern = "*"+feature+".npy"
if withMS:
data.dimensions.extend([self.featureGroupsIndex[i+1]-self.featureGroupsIndex[i]])
else:
dimensions = 0
for entry in self.featureGroupsDict[feature]:
dimensions = dimensions + entry[1] - entry[0]
data.dimensions.extend([dimensions])
dataPb.data.extend([data])
data = deepnet_pb2.Dataset.Data()
data.name = person+"_label"
data.dimensions.extend([1])
data.file_pattern = "*label.npy"
dataPb.data.extend([data])
dataPb.prefix = os.path.join(self.baseDir, person)
if withMS:
dataPb.name = os.path.basename(baseDir) + "withMS"
outputProtoFile = os.path.join(baseDir, 'data_withMS.pbtxt')
else:
dataPb.name = os.path.basename(baseDir) + "withoutMS"
outputProtoFile = os.path.join(baseDir, 'data_withoutMS.pbtxt')
if ne:
filePath = os.path.join(self.baseDir, person, "*.npy")
files = glob.glob(filePath)
for fileEntry in files:
tempData = np.load(fileEntry)
if tempData.shape[1] == 17593:
continue
instanceCount = instanceCount + tempData.shape[0]
fileName = os.path.splitext(fileEntry)[0]
if withMS:
for i, feature in self.featureGroups:
np.save(fileName + '_' + feature + "_withMS.npy", tempData[:, self.featureGroupsIndex[i]:self.featureGroupsIndex[i + 1]])
else:
for feature in self.featureGroups:
tempTuple = self.featureGroupsDict[feature][0]
tempArray = tempData[:, tempTuple[0]: tempTuple[1]]
if len(self.featureGroupsDict[feature]) > 1:
for i in range(1, len(self.featureGroupsDict[feature])):
tempTuple = self.featureGroupsDict[feature][i]
tempArray = np.concatenate((tempArray, tempData[:,tempTuple[0]: tempTuple[1]]), axis = 1)
np.save(fileName + '_' + feature + "_withoutMS.npy", tempArray)
np.save(fileName + '_label.npy', tempData[:, 17592])
else:
for fType in self.subTypeDir:
filePath = os.path.join(self.baseDir, person, fType, "*.npy")
files = glob.glob(filePath)
for fileEntry in files:
tempData = np.load(fileEntry)
assert(tempData.shape[1] == 17593)
instanceCount = instanceCount + tempData.shape[0]
baseName = os.path.splitext(os.path.basename(fileEntry))[0]
fileName = os.path.join(self.baseDir, person, baseName)
if withMS:
for i, feature in enumerate(self.featureGroups):
np.save(fileName + '_' + feature + "_withtMS.npy", tempData[:, self.featureGroupsIndex[i]:self.featureGroupsIndex[i + 1]])
else:
for feature in self.featureGroups:
tempTuple = self.featureGroupsDict[feature][0]
tempArray = tempData[:, tempTuple[0]: tempTuple[1]]
if len(self.featureGroupsDict[feature]) > 1:
for i in range(1, len(self.featureGroupsDict[feature])):
tempTuple = self.featureGroupsDict[feature][i]
tempArray = np.concatenate((tempArray, tempData[:,tempTuple[0]: tempTuple[1]]), axis = 1)
np.save(fileName + '_' + feature + "_withoutMS.npy", tempArray)
np.save(fileName + '_label.npy', tempData[:, 17592])
for entry in dataPb.data:
entry.size = instanceCount
with open(outputProtoFile, 'w') as f:
text_format.PrintMessage(dataPb, f)
def ParseOther(self, baseDir, withMS = False):
self.baseDir = baseDir
pathDir = os.path.join(baseDir, "*.npy")
files = glob.glob(pathDir)
instanceCount = 0
dataPb = deepnet_pb2.Dataset()
for i, feature in enumerate(self.featureGroups):
data = deepnet_pb2.Dataset.Data()
data.name = feature + "_"+ os.path.basename(baseDir)
data.file_pattern = "*"+feature+"*.npy"
if withMS:
data.dimensions.extend([self.featureGroupsIndex[i+1]-self.featureGroupsIndex[i]])
else:
dimensions = 0
for entry in self.featureGroupsDict[feature]:
dimensions = dimensions + entry[1] - entry[0]
data.dimensions.extend([dimensions])
dataPb.data.extend([data])
data = deepnet_pb2.Dataset.Data()
data.name = "label_" + os.path.basename(baseDir)
data.dimensions.extend([1])
data.file_pattern = "*label.npy"
dataPb.data.extend([data])
if withMS:
MS = "withMS"
outputProtoFile = os.path.join(baseDir, MS, "data_withMS.pbtxt")
else:
MS = "withoutMS"
outputProtoFile = os.path.join(baseDir, MS, "data_withoutMS.pbtxt")
dataPb.name = os.path.basename(baseDir) + "_"+ MS
dirPath = os.path.join(baseDir, MS)
dataPb.prefix = dirPath
for fileEntry in files:
tempData = np.load(fileEntry)
if len(tempData.shape) == 1 or tempData.shape[1] != 17593:
continue
instanceCount = instanceCount + tempData.shape[0]
baseName = os.path.basename(fileEntry)
fileName = os.path.join(dirPath,os.path.splitext(baseName)[0]) + "_" + MS
np.save(fileName + '_label.npy', tempData[:, 17592])
if withMS:
for i, feature in enumerate(self.featureGroups):
np.save(fileName + '_' + feature + "_withMS.npy", tempData[:, self.featureGroupsIndex[i]:self.featureGroupsIndex[i + 1]])
else:
for feature in self.featureGroups:
tempTuple = self.featureGroupsDict[feature][0]
tempArray = tempData[:, tempTuple[0]: tempTuple[1]]
if len(self.featureGroupsDict[feature]) > 1:
for i in range(1, len(self.featureGroupsDict[feature])):
tempTuple = self.featureGroupsDict[feature][i]
tempArray = np.concatenate((tempArray, tempData[:,tempTuple[0]: tempTuple[1]]), axis = 1)
np.save(fileName + '_' + feature + "_withoutMS.npy", tempArray)
for entry in dataPb.data:
entry.size = instanceCount
with open(outputProtoFile, 'w') as f:
text_format.PrintMessage(dataPb, f)
def main():
baseDir = sys.argv[1]
withMS = False if sys.argv[2].upper() == "FALSE" else True
person = sys.argv[3].upper()
if len(sys.argv) > 4:
ne = sys.argv[4].upper()
parser = FeatureParser()
if person == 'FALSE':
parser.ParseOther(baseDir,withMS = withMS)
else:
if ne == 'FALSE':
parser.ParsePerson(baseDir, False, withMS = withMS)
else:
parser.ParsePerson(baseDir, True, withMS = withMS)
if __name__ == '__main__':
main()
| null |
deepnet/examples/DeepEAR/LOSO_2_Multimodal_ITF/features_parser.py
|
features_parser.py
|
py
| 9,789 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset.Data",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset.Data",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "google.protobuf.text_format.PrintMessage",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "google.protobuf.text_format",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset.Data",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset.Data",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "deepnet.deepnet_pb2.Dataset",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "deepnet.deepnet_pb2",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "google.protobuf.text_format.PrintMessage",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "google.protobuf.text_format",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 185,
"usage_type": "attribute"
}
] |
9390248
|
import requests
import json
import time
import calendar
import logging
with open('config.json') as fp:
CONFIG = json.load(fp)
client_id = str(CONFIG['client_id'])
USERLIST_API = "http://tmi.twitch.tv/group/user/{}/chatters"
def get_current_users(ch, user_type='all'):
url = USERLIST_API.format(ch)
r = requests.get(url).json()
if user_type == 'all':
try:
all_users = set(sum(r['chatters'].values(), []))
except Exception as e:
logging.info("{}".format(r))
logging.exception("msg in another thread:")
all_users = set()
return all_users
elif user_type in ['moderators', 'staff', 'admins', 'global_mods', 'viewers']:
users = set(r['chatters'][user_type])
return users
else:
return set()
def get_stream_status(ch):
global client_id
url = 'https://api.twitch.tv/kraken/streams/' + ch
headers = {'Accept': 'application/vnd.twitchtv.v3+json', 'Client-ID': client_id}
r = requests.get(url, headers=headers)
info = json.loads(r.text)
# is_live, _id, created_at_ts, game, n_user
if info['stream'] == None:
return (False, 0, 0, "", 0)
else:
n_user = info['stream']['viewers']
game = info['stream']['game']
_id = info['stream']['_id']
created_at_str = info['stream']['created_at']
created_at_struct = time.strptime(created_at_str, "%Y-%m-%dT%H:%M:%SZ")
created_at_ts = calendar.timegm(created_at_struct)
return (True, _id, created_at_ts, game, n_user)
| null |
twitch_utils.py
|
twitch_utils.py
|
py
| 1,560 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "calendar.timegm",
"line_number": 45,
"usage_type": "call"
}
] |
292755576
|
from dashboard.models import *
from datetime import datetime
def weight_distribution():
distribution ={
'over_weight':0,
'under_weight':0,
'normal':0
}
cattle_objs = Cattle.objects.all()
for cattle in cattle_objs:
weight = DailyWeight.objects.all().order_by('date_time').last()
birth_date = cattle.birth_date
today = datetime.now().date()
age_months = (today.year - birth_date.year)*12
age_range = AgeRange.objects.filter(start_range__gte=age_months,end_range__lte=age_months+6)
print(age_months, age_range)
return distribution
| null |
instaweight/dashboard/graph_utils.py
|
graph_utils.py
|
py
| 634 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
}
] |
330831633
|
# Author: Jean-Remi King <[email protected]>
#
# Licence : GNU GPLv3
import numpy as np
from sklearn.base import BaseEstimator
from joblib import Parallel, delayed
class SelfRegression(BaseEstimator):
""" Fit a series of regressors that aim at predicting each feature when
the latter is hidden from the regressors.
Parameters
----------
estimator : sklearn regressor | None
The regressor. Defaults to LinearRegression()
n_jobs : int
The number of parallel cores.
Attributes
----------
estimators_ : array, shape (n_feature)
The array of fitted estimator for each feature.
y_pred_ : array, shape(n_samples, n_feature)
The predictions.
"""
def __init__(self, estimator=None, n_jobs=-1):
from mne.parallel import check_n_jobs
from sklearn.linear_model import LinearRegression
self.estimator = LinearRegression() if estimator is None else estimator
self.n_jobs = n_jobs = check_n_jobs(n_jobs)
def fit(self, X):
"""Fits a regressor for each feature.
Parameters
----------
X : array, shape (n_sample, n_feature)
The data.
"""
from sklearn.base import clone
n_sample, self.n_feature_ = X.shape
# Setup parallel
n_splits = n_jobs = np.min([self.n_jobs, self.n_feature_])
parallel = Parallel(n_jobs)
p_func = delayed(_fit_loop)
# Split chunks of features to avoid overheads
splits = np.array_split(np.arange(self.n_feature_), n_splits)
out = parallel(p_func([clone(self.estimator) for f in split], X, split)
for split in splits)
self.estimators_ = np.concatenate(out, axis=0)
def predict(self, X):
"""Predict all features.
Parameters
----------
X : array, shape (n_sample, n_feature)
The data.
Returns
-------
X_pred : array, shape(n_sample, n_feature)
"""
n_sample, n_feature = X.shape
if n_feature != self.n_feature_:
raise ValueError('X must have same dims in fit and predict.')
n_splits = n_jobs = np.min([self.n_jobs, self.n_feature_])
parallel = Parallel(n_jobs)
p_func = delayed(_predict_loop)
splits = np.array_split(np.arange(n_feature), n_splits)
y_pred = parallel(p_func(self.estimators_[split], X, split)
for split in splits)
self.y_pred_ = np.hstack(y_pred)
return self.y_pred_
def _fit_loop(estimators, X, split):
"""Auxiliary functions of SelfRegression"""
_, n_feature = X.shape
for feature, estimator in zip(split, estimators):
features = np.delete(np.arange(n_feature), feature)
estimator.fit(X[:, features], y=X[:, feature])
return estimators
def _predict_loop(estimators, X, split):
"""Auxiliary functions of SelfRegression"""
n_sample, n_feature = X.shape
y_pred = np.zeros((n_sample, len(split)))
for f_idx, (feature, estimator) in enumerate(zip(split, estimators)):
features = np.delete(np.arange(n_feature), feature)
y_pred[:, f_idx] = estimator.predict(X[:, features])
return y_pred
def detect_bad_channels(raw, estimator=None, n_train=1e4, n_test=1e4,
n_jobs=-1, picks=None,):
"""This example shows how EEG/MEG bad channel detection can be done by
trying to predict the value of each channel of each time point from the
activity of all other channels at the corresponding time points.
Indeed, knowning the high spatial correlation of EEG/MEG signals, a given
channel can be considered as noisy if it doesn't (anti)correlate with any
other channels.
Note that:
- this this doesn't work for intracranial EEG, where the spatial
correlation is much smaller.
- this method isn't ideal to identify bad timing. For this, I would
recommend Alex. Barachant's Potato algorithm available at
http://github.com/abarachant/pyRiemann
"""
from mne import pick_types
from sklearn.preprocessing import RobustScaler
# Subsample times for faster computation
# Note that, considering that n_sample >> n_feature, a real cross-
# validation isn't really necessary
times = np.arange(len(raw.times))
np.random.shuffle(times)
times = times[:(n_train + n_test)]
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True)
X = raw._data[picks, :][:, times].T.copy()
# To be consistent across chan types, we'll normalize the data:
X = RobustScaler().fit_transform(X)
n_time, n_chan = X.shape
# Fit
art = SelfRegression(estimator=estimator, n_jobs=n_jobs)
art.fit(X[:n_train, :])
Xpred = art.predict(X[-n_test:, :])
# Score
errors = (Xpred-X[-n_test:, :]) ** 2
return errors
def remove_linenoise(raw, noise_freq, width=2, shuffle_time=True, decim=100,
n_component=1, plot=False, copy=True, picks=None,
harmonics=True):
import matplotlib.pyplot as plt
from mne import pick_types
from mne.preprocessing import ICA
from mne.time_frequency.psd import psd_welch
# Setup line frequency
if isinstance(noise_freq, str):
# automatic harmonics
if noise_freq == 'us':
noise_freq = 60
else:
noise_freq = 50
elif not isinstance(noise_freq, (float, int)):
raise NotImplementedError('Multiple bands')
def plot_psd(psd, freqs, ax, title):
for psd_ in psd:
ax.plot(freqs, np.log10(psd_))
ax.set_xlabel('Frequencies')
ax.set_title(title)
if copy:
raw = raw.copy()
if picks is None:
picks = pick_types(raw.info, eeg=True, meg=True, seeg=True)
if plot:
fig, axes = plt.subplots(1, 3, sharex=True)
psd, freqs = psd_welch(raw, picks=picks)
plot_psd(psd, freqs, axes[0], 'Raw Sensors')
# Fit ICA on filtered data
raw_ = raw.copy()
if harmonics:
# set up harmonics
n_harm = raw_.info['sfreq'] // (2. * noise_freq) + 1
harmonics = noise_freq * np.arange(1, n_harm)
# Band pass filtering outside lowest harmonics and nquist
raw_.filter(noise_freq - width, harmonics[-1] + width)
# Band stop filter in between harmonics
raw_.notch_filter(freqs=harmonics[:-1]+noise_freq//2,
notch_widths=noise_freq - 2*width)
else:
raw_.filter(noise_freq-width, noise_freq+width)
# Shuffle time axis to avoid decimation aliasing
if shuffle_time:
time = np.arange(raw_.n_times)
np.random.shuffle(time)
raw_._data[:, time] = raw_._data
ica = ICA(verbose=False)
ica.fit(raw_, decim=decim, picks=picks)
# Compute PSD of components
raw_._data[picks, :] = np.dot(ica.mixing_matrix_, raw._data[picks, :])
psd, freqs = psd_welch(raw_, picks=picks)
if plot:
plot_psd(psd, freqs, axes[1], 'Components')
# Find noise component and remove
freq = np.where(freqs >= noise_freq)[0][0]
sel = np.argsort(psd[:, freq])[-n_component:].tolist()
raw_ = ica.apply(raw, exclude=sel, copy=True)
if plot:
psd, freqs = psd_welch(raw_, picks=picks)
plot_psd(psd, freqs, axes[2], 'Clean sensors')
return raw_
def find_reference(raw, n_cluster, pick_types=None, copy=True,
flat_threshold=1e-15, n_split=100, plot=True):
""" Computes covariance on splits of the raw data, and apply KMeans
clustering to find the number of disjoint references.
n_cluster is found with PCA if float
"""
import matplotlib.pyplot as plt
from pyriemann.estimation import Covariances
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
if copy:
raw = raw.copy()
# Remove flat lines
flat = np.where(np.std(raw._data, axis=1) < flat_threshold)[0]
for ch in flat:
raw.info['bads'] += [raw.ch_names[ch]]
# Pick data channels only
if pick_types is None:
pick_types = dict(seeg=True, exclude='bads')
raw.pick_types(**pick_types)
# Compute covariance on data splits
n_time = len(raw.times)
t_max = raw.times[n_time - n_time % n_split - 1]
raw.crop(0, t_max, copy=False) # ensure regularly sized splits
X = np.array(np.array_split(raw._data, n_split, axis=1))
covs = Covariances().fit_transform(X)
# Compute cluster for each data split
cluster = KMeans(n_cluster)
all_kmeans = list()
for cov in covs:
dist = pairwise_distances(cov)
all_kmeans.append(cluster.fit_predict(dist))
# Combine clusters
dist = pairwise_distances(np.array(all_kmeans).T)
idx = cluster.fit_predict(dist)
if plot:
idx_ = np.argsort(idx)
cov = np.median(covs, axis=0)
plt.matshow(np.log10(cov)[idx_, :][:, idx_])
clusters = [np.array(raw.ch_names)[idx == ii] for ii in np.unique(idx)]
return clusters
| null |
jr/meg/artefact.py
|
artefact.py
|
py
| 9,082 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "mne.parallel.check_n_jobs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sklearn.base.clone",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "mne.pick_types",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.RobustScaler",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "{'check_n_jobs': 'mne.parallel.check_n_jobs', 'LinearRegression': 'sklearn.linear_model.LinearRegression', 'clone': 'sklearn.base.clone'}",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "mne.pick_types",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "mne.time_frequency.psd.psd_welch",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "mne.preprocessing.ICA",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "mne.time_frequency.psd.psd_welch",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "mne.time_frequency.psd.psd_welch",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "mne.pick_types",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "mne.pick_types",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "mne.pick_types",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "pyriemann.estimation.Covariances",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.pairwise_distances",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.pairwise_distances",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.matshow",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "numpy.log10",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 265,
"usage_type": "call"
}
] |
477219843
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import seaborn as sns
from ipywidgets import interactive, FloatSlider, fixed, Text, BoundedIntText
from ipywidgets import Layout, Dropdown
from outbreak_modelling import *
def sims_to_longform(sims):
"""
Convert one or more simulations to long-form format for plotting
with seaborn. The input `sims` should take the form of a dict:
keys should be strings labelling the simulations; values should
be the sim DataFrames themselves.
"""
result = pd.concat({k: v.rename_axis('PROJECTION', axis=1).stack().rename('Value')
for k, v in sims.items()})
result.index.rename('SIMULATION NAME', level=0, inplace=True)
result = result.to_frame().reset_index()
result['PROJECTION'] = result['PROJECTION'].astype('category')
return result
def pivot_plot_data(plot_data):
plot_data = plot_data.set_index(['Date', 'SIMULATION NAME', 'PROJECTION'])
plot_data = plot_data.squeeze().rename(None)
plot_data = plot_data.unstack(['SIMULATION NAME', 'PROJECTION'])
return plot_data
def plot_simulations(sims, observations, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(12, 8))
plt.sca(ax)
plot_data = sims_to_longform(sims)
sns.lineplot(data=plot_data,
x='Date', y='Value', hue='PROJECTION',
style='SIMULATION NAME',
hue_order=['All cases', 'All deaths',
'Daily cases', 'Daily deaths'],
dashes = ['', (2, 4)])
ax.plot([], [], ' ', label='OBSERVATIONS')
ax.set_prop_cycle(None)
observations[['All cases', 'All deaths', 'Daily new cases',
'Daily deaths']].plot(ax=ax, marker='o', ls='',
markersize=2)
ax.set_yscale('log')
ax.set_ylim(1, None)
ax.yaxis.set_major_locator(ticker.LogLocator(10., (1.,), 15, 15))
ax.yaxis.set_minor_locator(ticker.LogLocator(10., range(10), 15, 15))
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
ax.grid(which='minor', axis='y', alpha=0.2)
ax.legend().set_title('')
ax.legend()
ax.set_ylabel('')
ax.set_xlabel('Date')
#plt.xticks(rotation=90)
return pivot_plot_data(plot_data)
def explore_simulation(initial_growth_rate,
serial_interval,
latent_fraction,
f_cdr, f_cfr,
T_detect, T_resolve, T_death,
cv_detect, cv_resolve, cv_death,
R_0_lockdown,
lockdown_release_date,
lockdown_release_timeframe_weeks,
sim_time_weeks,
weights,
observations):
initial_growth_rate /= 100
f_cdr /= 100
f_cfr /= 100
cv_detect /= 100
cv_resolve /= 100
cv_death /= 100
try:
lockdown_release_date = pd.to_datetime(lockdown_release_date)
except (TypeError, ValueError) as e:
print('Error understanding lockdown release date:\n')
print(e)
return
lockdown_release_end = (lockdown_release_date +
pd.to_timedelta(7*lockdown_release_timeframe_weeks,
'D'))
if lockdown_release_date < pd.to_datetime('20/04/01'):
print('Lockdown cannot be released before April 2020')
return
unc_model = SEIRObsModel(f_cdr, f_cfr, cv_detect, T_detect, cv_resolve,
T_resolve, cv_death, T_death,
start_date = "2020/02/01",
early_growth_rate = initial_growth_rate,
mean_generation_time = serial_interval,
lat_fraction = latent_fraction,
initial_state = SEIR.make_state(S=6.64e7, I=1))
unc_model.fit(observations['All cases'][observations['phase']=='unrestricted'],
observations['All recovered'][observations['phase']=='unrestricted'],
observations['All deaths'][observations['phase']=='unrestricted'],
7*sim_time_weeks, weights=weights)
sim_baseline = unc_model.predict(7*sim_time_weeks)
ld_model = SEIRObsModel(f_cdr, f_cfr, cv_detect, T_detect, cv_resolve,
T_resolve, cv_death, T_death,
start_date = unc_model.start_date,
early_growth_rate = initial_growth_rate,
mean_generation_time = serial_interval,
lat_fraction = latent_fraction,
initial_state = SEIR.make_state(S=6.64e7, I=1))
R_0_ld = piecewise_linear_R_0_profile(['2020/03/10', '2020/03/26',
lockdown_release_date,
lockdown_release_end],
[unc_model.R_0(0),
R_0_lockdown, R_0_lockdown,
unc_model.R_0(0)],
sim_baseline)
ld_model.R_0 = R_0_ld
sim_ld = ld_model.predict(7*sim_time_weeks)
assert ((sim_baseline.index - sim_ld.index).to_series().dt.days==0).all()
_, (axt, axb) = plt.subplots(2, 1, figsize=(12, 16),
gridspec_kw={'height_ratios': [1, 3]})
plt.sca(axt)
plt.plot(sim_baseline.index, R_0_ld(range(len(sim_baseline))))
plt.ylim(0, None)
plt.ylabel('$R_0(t)$')
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.title('$R_0$ profile')
plot_data = plot_simulations({'UK lockdown': sim_ld,
'Unconstrained baseline': sim_baseline},
observations,
ax=axb)
axb.set_title('Projections')
for ax in (axt, axb):
ax.axvspan(pd.to_datetime('2020/03/10'),
pd.to_datetime('2020/03/26'), color='orange', alpha=0.03)
ax.axvspan(pd.to_datetime('2020/03/26'),
lockdown_release_date, color='r', alpha=0.04)
ax.axvspan(lockdown_release_date, lockdown_release_end, color='g', alpha=0.05)
plt.subplots_adjust(hspace=0.5)
plot_data.to_csv('simdata/last-simulation.csv')
return plot_data
def my_slider(value, mymin, mymax, step, description):
return FloatSlider(value=value, min=mymin, max=mymax, step=step,
description=description,
layout=Layout(width='500px'),
style={'description_width': 'initial'})
def my_text_box(value, mymin, mymax, step, description):
return BoundedIntText(value=value, min=mymin, max=mymax, step=step,
description=description,
style={'description_width': 'initial'})
def interactive_simulation(observations):
return interactive(explore_simulation,
{'manual':True},
initial_growth_rate = my_slider(26, 5, 50, 1, 'Initial growth rate, %'),
serial_interval = my_slider(6.5, 2, 10, 0.5, 'Mean serial interval, days'),
latent_fraction = my_slider(0.71, 0.1, 0.9, 0.1, 'Latent period fraction'),
f_cdr = my_slider(4.4, 0.1, 10, 0.1, 'Case detection rate, %'),
f_cfr = my_slider(33, 1, 100, 1, 'Case fatality rate, %'),
T_detect = my_slider(11, 1, 30, 1, 'Time to detection, days'),
T_resolve = my_slider(9, 1, 30, 1, 'Time to recovery, days'),
T_death = my_slider(10, 1, 56, 1, 'Time to death, days'),
cv_detect = my_slider(33, 1, 99, 1, 'Detection time variability, %'),
cv_resolve = my_slider(33, 1, 99, 1, 'Recovery time variability, %'),
cv_death = my_slider(20, 1, 99, 1, 'Death time variability, %'),
R_0_lockdown = my_slider(1.2, 0.1, 4, 0.1, '$R_0$ during lockdown'),
lockdown_release_date = Text(value='2020/06/30',
description='Lockdown release date',
style={'description_width': 'initial'}),
lockdown_release_timeframe_weeks = my_text_box(26, 1, 9999, 1, 'Number of weeks for lockdown release'),
sim_time_weeks = my_text_box(52, 1, 999, 1, 'Simulation length, weeks'),
weights = Dropdown(options=[('Cases', [1, 0, 0]),
('Deaths', [0, 0, 1]),
('Cases & deaths',
[.5, 0, .5])],
description='Fit to ',
style={'description_width': 'initial'}),
observations = fixed(observations))
| null |
plotutils.py
|
plotutils.py
|
py
| 9,178 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.concat",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.sca",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "seaborn.lineplot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.LogLocator",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.LogLocator",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.StrMethodFormatter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.to_timedelta",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.sca",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "ipywidgets.FloatSlider",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Layout",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "ipywidgets.BoundedIntText",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interactive",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Text",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "ipywidgets.fixed",
"line_number": 183,
"usage_type": "call"
}
] |
132788144
|
" I am a generic padding oracle exploit written using the Twisted framework "
from twisted.internet import defer
from random import choice
import sys
ASCII = map(chr, range(256))
ASCII.reverse()
def xor(a, b):
return ''.join([chr(ord(x) ^ ord(y)) for x, y in zip(a, b)])
class POExploit:
" I am the padding oracle exploit "
def __init__(self, oracle, size=16):
self.oracle = oracle
self.size = size
@defer.inlineCallbacks
def decrypt(self, encrypted):
" I simply decrypt the encrypted text without the key "
blocks = [encrypted[i:i+self.size]
for i in range(0, len(encrypted), self.size)]
decrypted = yield defer.DeferredList(
[self.decrypt_block(blocks[i-1], blocks[i])
for i in range(1, len(blocks))])
defer.returnValue(''.join([x[1] for x in decrypted]))
@defer.inlineCallbacks
def decrypt_block(self, previous, block):
" I decrypt a block in CBC mode given the previous block "
image = ""
for n in range(self.size):
while True:
payload = choice(ASCII)
build = yield defer.DeferredList(
[self.oracle(payload * (self.size - n - 1) + i + \
xor(image, chr(n + 1) * n) + block)
for i in ASCII])
match = [ASCII[i] for i in range(256) if build[i][1] == True]
if len(match) == 1:
sys.stdout.write('.')
sys.stdout.flush()
image = xor(chr(n+1), match[0]) + image
break
defer.returnValue(xor(previous, image))
| null |
poracle.py
|
poracle.py
|
py
| 1,714 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "twisted.internet.defer.DeferredList",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.returnValue",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.defer",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer.DeferredList",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.defer.returnValue",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.defer",
"line_number": 29,
"usage_type": "name"
}
] |
618447371
|
import sqlite3
from public import website
from flask import g
DATABASE = 'db/message-board.db'
def connect_db():
db = sqlite3.connect(DATABASE)
db.row_factory = convert_row_to_dictionary
return db
def init_db():
with website.app_context():
db = get_db()
with website.open_resource('/db/schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_db()
return db
@website.before_request
def before_request():
g.db = connect_db()
@website.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
try:
cur = get_db().execute(query, args)
get_db().commit()
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
except sqlite3.Error as er:
print('er:', er)
return None
def convert_row_to_dictionary(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row))
| null |
assets/projects/message-board/public/datamanager.py
|
datamanager.py
|
py
| 1,211 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "public.website.app_context",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "public.website",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "public.website.open_resource",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "public.website",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "flask.g._database",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.g.db",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "public.website.before_request",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "public.website",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "public.website.teardown_request",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "public.website",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sqlite3.Error",
"line_number": 54,
"usage_type": "attribute"
}
] |
206451197
|
from django.shortcuts import get_object_or_404, render,redirect,get_list_or_404
from tly_shopingcart.models import *
from .models import *
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def cart_details(request,tot=0,count=0,cart_itmes=None):
try:
ct=cartlist.objects.get(cart_id=c_id(request))
ct_itmes=item.objects.filter(cart=ct,active=True)
for i in ct_itmes:
tot+=(i.prodt.price*i.quan)
count+=i.quan
except ObjectDoesNotExist:
pass
return render(request,'cart.html',{'ct':ct_itmes,'t':tot,'cn':count})
def c_id(request):
ct_id=request.session.session_key
if not ct_id:
ct=request.session.create()
return ct_id
def add_cart(request,product_id):
prod=product.objects.get(id=product_id)
try:
ct=cartlist.objects.get(cart_id=c_id(request))
except cartlist.DoesNotExist:
ct=cartlist.objects.create(cart_id=c_id(request))
ct.save()
try:
c_items=item.objects.get(prodt=prod,cart=ct)
if c_items.quan < c_items.prodt.stock:
c_items.quan+=1
c_items.save()
except item.DoesNotExist:
c_items=item.objects.create(prodt=prod,quan=1,cart=ct)
c_items.save()
return redirect('cart_details')
def min_cart(request,product_id):
ct=cartlist.objects.get(cart_id=c_id(request))
prod=get_object_or_404(product,id=product_id)
c_items=item.objects.get(prodt=prod,cart=ct)
if c_items.quan>1:
c_items.quan -=1
c_items.save()
else:
c_items.delete()
return redirect('cart_details')
def cart_delete(request,product_id):
ct=cartlist.objects.get(cart_id=c_id(request))
prod=get_object_or_404(product,id=product_id)
c_items=item.objects.get(prodt=prod,cart=ct)
c_items.delete()
return redirect('cart_details')
| null |
add_cart/views.py
|
views.py
|
py
| 1,888 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 56,
"usage_type": "call"
}
] |
66824095
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2008 Frank Niessink <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import patterns
class ViewerContainer(object):
''' ViewerContainer is a container of viewers. It has a containerWidget
that displays the viewers. The containerWidget can be a notebook or
an AUI managed frame. The ViewerContainer knows which of its viewers
is active and dispatches method calls to the active viewer or to the
first viewer that can handle the method. This allows other GUI
components, e.g. menu's, to talk to the ViewerContainer as were
it a regular viewer. '''
def __init__(self, containerWidget, settings, setting, *args, **kwargs):
self.containerWidget = containerWidget
self.bindContainerWidgetEvents()
self._settings = settings
self.__setting = setting
self.viewers = []
self.__currentPageNumber = 0
# Prepare for an exception, because this setting used to be a string
try:
self.__desiredPageNumber = int(self._settings.get('view', setting))
except ValueError:
self.__desiredPageNumber = 0
super(ViewerContainer, self).__init__(*args, **kwargs)
def bindContainerWidgetEvents(self):
eventsAndHandlers = dict(pageClosedEvent=self.onPageClosed,
pageChangedEvent=self.onPageChanged)
for event, handler in eventsAndHandlers.items():
if hasattr(self.containerWidget, event):
self.containerWidget.Bind(getattr(self.containerWidget, event),
handler)
def __getitem__(self, index):
return self.viewers[index]
def addViewer(self, viewer):
self.containerWidget.AddPage(viewer, viewer.title(), viewer.bitmap())
self.viewers.append(viewer)
if len(self.viewers) - 1 == self.__desiredPageNumber:
# We need to use CallAfter because the AuiNotebook doesn't allow
# PAGE_CHANGING events while the window is not active. See
# widgets/notebook.py
wx.CallAfter(self.containerWidget.SetSelection,
self.__desiredPageNumber)
patterns.Publisher().registerObserver(self.onSelect,
eventType=viewer.selectEventType(), eventSource=viewer)
@classmethod
def selectEventType(class_):
return '%s.select'%class_
@classmethod
def viewerChangeEventType(class_):
return '%s.viewerChange'%class_
def __getattr__(self, method):
''' Return a function that will call the method on the first viewer
that both has the requested method and does not raise an exception.
Start looking in the current viewer. NB: this auto forwarding only
works for methods, not for properties. '''
def findFirstViewer(*args, **kwargs):
for viewer in [self.activeViewer()] + self.viewers:
if hasattr(viewer, method):
return getattr(viewer, method)(*args, **kwargs)
else:
raise AttributeError
return findFirstViewer
def activeViewer(self):
''' Return the active viewer, i.e. the viewer that has the focus. '''
# We try to find the active viewer by starting with the window
# that has the focus and then see whether that window is a viewer
# or a child of a viewer
windowWithFocus = wx.Window.FindFocus()
while windowWithFocus:
for viewer in self.viewers:
if viewer == windowWithFocus:
self.__currentPageNumber = self.viewers.index(windowWithFocus)
return viewer
windowWithFocus = windowWithFocus.Parent
# If there is no viewer (or child of a viewer) that has the focus
# we return the viewer that was last active
return self.viewers[self.__currentPageNumber]
def __del__(self):
pass # Don't forward del to one of the viewers.
def onSelect(self, event):
patterns.Publisher().notifyObservers(patterns.Event(self,
self.selectEventType(), *event.values()))
def onPageChanged(self, event):
self._changePage(event.Selection)
event.Skip()
def onPageClosed(self, event):
try: # Notebooks and similar widgets:
viewer = self.viewers[event.Selection]
except AttributeError: # Aui managed frame:
if event.GetPane().IsToolbar():
return
viewer = event.GetPane().window
# When closing an AUI managed frame, we get two close events,
# be prepared:
if viewer in self.viewers:
self._closePage(viewer)
if self.__currentPageNumber >= len(self.viewers):
self._changePage(0)
event.Skip()
def _closePage(self, viewer):
self.viewers.remove(viewer)
viewer.detach()
setting = viewer.__class__.__name__.lower() + 'count'
viewerCount = self._settings.getint('view', setting)
self._settings.set('view', setting, str(viewerCount-1))
def _changePage(self, pageNumber):
self.__currentPageNumber = pageNumber
self._settings.set('view', self.__setting, str(pageNumber))
patterns.Publisher().notifyObservers(patterns.Event(self,
self.viewerChangeEventType(), pageNumber))
| null |
branches/Release0_72_Branch/taskcoachlib/gui/viewer/container.py
|
container.py
|
py
| 6,275 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wx.CallAfter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "taskcoachlib.patterns.Publisher",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "taskcoachlib.patterns",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "wx.Window.FindFocus",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "wx.Window",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "taskcoachlib.patterns.Publisher",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "taskcoachlib.patterns",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "taskcoachlib.patterns.Event",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "taskcoachlib.patterns.Publisher",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "taskcoachlib.patterns",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "taskcoachlib.patterns.Event",
"line_number": 142,
"usage_type": "call"
}
] |
424712386
|
from django.urls import path
from article import views
app_name = 'article'
urlpatterns = [
path('', views.article, name='article'),
path('articleCreate/', views.articleCreate, name='articleCreate'),
path('articleRead/<int:articleId>/', views.articleRead, name='articleRead'),
path('articleUpdate/<int:articleId>/', views.articleUpdate, name='articleUpdate'),
path('articleDelete/<int:articleId>/', views.articleDelete, name='articleDelete'),
path('articleSearch/', views.articleSearch, name='articleSearch'),
]
| null |
blog/article/urls.py
|
urls.py
|
py
| 538 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "article.views.article",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "article.views.articleCreate",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "article.views.articleRead",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "article.views.articleUpdate",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "article.views.articleDelete",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "article.views.articleSearch",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "article.views",
"line_number": 12,
"usage_type": "name"
}
] |
85853855
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import from_levels_and_colors
from mpl_toolkits.basemap import Basemap, cm
import Nio # use PyNIO for netCDF data I/O, instead of scipy.io
theta_sat_data = np.genfromtxt(
'/Users/wusun/Dropbox/IGBP_SOILS/local_output/ThetaS.srf', usecols=0)
#theta_sat_file = open('/Users/wusun/Dropbox/IGBP_SOILS/local_output/ThetaS.srf', 'r')
# #theta_sat_file.readline()
# for i, line in enumerate(theta_sat_file):
# if i == 40536:
# print line
# elif i > 233530:
# break
nx = 4320
ny = 1686
theta_sat = np.array(theta_sat_data).reshape((ny,nx))
theta_sat[np.where(theta_sat < 0)] = np.nan
lat_coords = np.arange(84, -56.5, -1./12.) # arange doesn't include endpoint
lon_coords = np.arange(-180, 180, 1./12.)
lons, lats = np.meshgrid(lon_coords,lat_coords)
theta_sat_masked = np.ma.masked_where(np.isnan(theta_sat),theta_sat)
# save as netCDF file using PyNIO
theta_sat_nc_file = Nio.open_file(
'/Users/wusun/Dropbox/Projects/Regional COS flux/data/theta_sat.nc', 'c')
theta_sat_nc_file.create_dimension('lat', 1686)
theta_sat_nc_file.create_dimension('lon', 4320)
nc_latitudes = theta_sat_nc_file.create_variable('lat', 'd', ('lat',))
nc_longitudes = theta_sat_nc_file.create_variable('lon', 'd', ('lon',))
nc_latitudes[:] = lat_coords
nc_longitudes[:] = lon_coords
nc_theta_sat = theta_sat_nc_file.create_variable('theta_sat', 'd',
('lat','lon',))
nc_theta_sat[:,:] = theta_sat
# setting attributes
theta_sat_nc_file.title = 'Surface soil porosity map (0-30 cm)'
theta_sat_nc_file.references = 'IGBP DIS'
nc_latitudes.long_name = 'latitude'
nc_latitudes.standard_name = 'latitude'
nc_latitudes.units = 'degrees_north'
nc_latitudes.axis = 'y'
nc_latitudes._CoordinateAxisType = 'Lat'
nc_longitudes.long_name = 'longitude'
nc_longitudes.standard_name = 'longitude'
nc_longitudes.units = 'degrees_east'
nc_longitudes.axis = 'x'
nc_longitudes._CoordinateAxisType = 'Lon'
nc_theta_sat.long_name = 'Surface soil porosity'
nc_theta_sat.standard_name = 'Surface soil porosity'
nc_theta_sat.units = 'm3/m3'
nc_theta_sat.grid_mapping = ''
theta_sat_nc_file.close()
m = Basemap(projection='cea',resolution='l')
#m.fillcontinents(color='#ffffff',lake_color='#00ccff')
m.drawlsmask(land_color='#ffffff',ocean_color='#00ccff',lakes=True)
m.drawmeridians(np.arange(0,360,30))
m.drawparallels(np.arange(-90,90,30))
theta_sat_map = m.pcolormesh(lons,lats,theta_sat_masked,shading='flat',
cmap=plt.cm.BrBG,latlon=True)
cb = m.colorbar(theta_sat_map,"bottom")
m.drawcoastlines(linewidth=0.25)
#plt.savefig('/Users/wusun/Dropbox/Projects/SMOS/theta_sat_map.pdf', dpi=300)
plt.savefig('/Users/wusun/Dropbox/Projects/Regional COS flux/plots/theta_sat_map.png', dpi=300)
#plt.show()
plt.clf()
m2 = Basemap(projection='cea',llcrnrlat=32,urcrnrlat=42,\
llcrnrlon=-125,urcrnrlon=-114,lat_ts=30,resolution='h')
m2.drawlsmask(land_color='#ffffff',ocean_color='#00ccff',lakes=True)
m2.drawcountries(linewidth=1.5)
m2.drawstates(linewidth=1)
theta_sat_map2 = m2.pcolormesh(lons,lats,theta_sat_masked,shading='flat',
cmap=plt.cm.BrBG,latlon=True)
cb2 = m2.colorbar(theta_sat_map2,"right")
m2.drawcoastlines(linewidth=0.5)
plt.savefig('/Users/wusun/Dropbox/Projects/Regional COS flux/plots/theta_sat_map_CA.png', dpi=300)
| null |
plot_theta_sat.py
|
plot_theta_sat.py
|
py
| 3,290 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.genfromtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_where",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.isnan",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Nio.open_file",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "mpl_toolkits.basemap.Basemap",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.basemap.Basemap",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
}
] |
229896601
|
import json
from django.views.generic import View
from js_logger.forms import ErrorForm
from js_logger import models
from django import http
def get_ip(request):
"""
Get IP address from request
"""
ip = request.META.get('REMOTE_ADDR') or \
models.Error._meta.get_field('ip').default
if ip and ip.startswith('::ffff:'):
ip = ip[7:]
return ip
class LogErrorView(View):
"""
Log javascript error to database
"""
def post(self, *args, **kwargs):
"""
Create log item
"""
form = ErrorForm(self.request.POST)
if not form.is_valid():
return http.HttpResponse(
json.dumps(form.errors),
content_type='application/json',
status=400
)
obj = form.save()
obj.user = self.request.user
obj.ip = get_ip(self.request)
obj.save()
return http.HttpResponse(
json.dumps({"id": obj.pk}),
content_type='application/json'
)
| null |
js_logger/views.py
|
views.py
|
py
| 1,046 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "js_logger.models.Error._meta.get_field",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "js_logger.models.Error",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "js_logger.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "js_logger.forms.ErrorForm",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 43,
"usage_type": "call"
}
] |
334349232
|
# Functions for the itemset mining.
import csv
import pandas as pd
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms.approximation import clique
# FUNCTION THAT TURNS A DICTIONARY INTO GRAPH IF VALUES ARE EQUAL
def GraphDict(names):
graph = defaultdict(list)
for key, values in names.items():
listed = []
for key2, values2 in names.items():
if values == values2:
listed.append(key2)
else:
continue
listed = listed[1:]
graph[key] = listed
return graph
# GENERATE EDGES FOR THE GRAPH
def generate_edges(graph):
edges = []
# for each node in graph
for node in graph:
# for each neighbour node of a single node
for neighbour in graph[node]:
# if edge exists then append
edges.append((node, neighbour))
return edges
# SHORTEST PATH FROM ONE RECIPE TO ANOTHER
def find_shortest_path(graph, start, end, path =[]):
path = path + [start]
if start == end:
return path
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
with open("/Users/harryritchie/Documents/Aeropress16/Coffee_17.csv") as file:
read_data = csv.reader(file,delimiter=';')
names = {}
for row in read_data:
print(row)
# GRAPH
# GRAPH = GraphDict(names)
# G = nx.Graph(GRAPH)
# MAX CLIQUE WITH MINED SETTINGS SUP > 0.5
# print(clique.max_clique(G))
# PLOT GRAPH
# pos=nx.spring_layout(G)
# nx.draw_networkx(G,pos)
# plt.show()
| null |
coffee_analysis.py
|
coffee_analysis.py
|
py
| 1,820 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.defaultdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 59,
"usage_type": "call"
}
] |
611519610
|
#Tkinter has different import names on different versions.
import tkinter
from tkinter import *
from tkinter import Frame
import tkinter.scrolledtext as tkst
#These are all standard LIbs
import subprocess, shlex, sys
from threading import Thread
from multiprocessing import Queue
from time import sleep
#This is a local lib
import config
class RedirectText(object):
""""""
#----------------------------------------------------------------------
def __init__(self, text_ctrl):
"""Constructor"""
self.output = text_ctrl
self.fileno = sys.stdout.fileno
#----------------------------------------------------------------------
def write(self, string):
""""""
self.output.insert(tkinter.INSERT, string + "\n")
class window(tkinter.Frame):
def __init__(self, root):
#Defining a few convinience constants.
# options for buttons
#button_style = {'fill': tkconstants.BOTH, 'padx': 5, 'pady': 5}
# define options for opening or saving a file
self.root = root; #Need a pointer to this for later
#TODO: need to find a 'nicer' way to kill the app outside of tkinter's scope.
self.file_opt = options = {}
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['initialdir'] = config.jmxFolderPath
options['parent'] = self.root
options['title'] = 'select a jmx file: '
# setting up the Tkinter frame
#root.geometry('{}x{}'.format(800, 600))
self.frame = tkinter.Frame(root);
self.frame.grid(column=0,row=0)
self.frame.columnconfigure(0,weight=1)
self.frame.rowconfigure(0,weight=1)
self.console = tkst.ScrolledText(self.frame)
self.console.grid(row=3, column=1, columnspan=6)
self.re = RedirectText(self.console)
sys.stdout = self.re
#Define the Menu bar
menubar=tkinter.Menu(self.frame)
filemenu = tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label="Open JMX File...", command=self.getFilename)
filemenu.add_command(label="Save", command=self.hello)
filemenu.add_separator()
filemenu.add_command(label="Restart pyJmx", command=self.restart)
filemenu.add_command(label="Exit", command=self.root.quit)
menubar.add_cascade(label="File", menu=filemenu)
runMenu = tkinter.Menu(menubar, tearoff=0)
runMenu.add_command(label="Jmeter GUI", command=self.hello)
runMenu.add_command(label="JMX Test via Console", command=self.hello)
runMenu.add_command(label="run Selected JMX", command=self.runJmeter)
menubar.add_cascade(label="Run...", menu=runMenu)
root.config(menu=menubar)
# define buttons and labels
self.currentFile = tkinter.StringVar()
self.currentFile.set('[No file Chosen]')
#Tkinter.Label(self.)
fileInput = tkinter.Entry(self.frame, textvariable=self.currentFile, width=40)
fileInput.grid(row=2, column=1, columnspan=5)
tkinter.Button(self.frame, text='Run Test', command=self.runJmeter).grid(row=2, column=5, sticky=tkinter.E)
# Initialize the Terminal
self.process= subprocess.Popen("python --version", shell=True)
def getFilename(self):
# get filename
filename = tkFileDialog.askopenfilename(**self.file_opt)
if(filename != ''):
self.currentFile.set(filename)
def runJmeter(self):
subprocess.call(config.setPathCommand, shell=True)
jmxName = self.currentFile.get()
if(jmxName == '' or jmxName == '[No file Chosen]'):
self.userAlert("No jmx file is chosen yet.")
else:
jmxName=jmxName.replace('/','\\')
# Comment out for Windows:
#jmxName=jmxName.replace(' ','\ ')
print(jmxName)
# TODO: Display this output on the UI in a buffered text box, while still outputting to a time-stamped text file.
cmdStr = config.jMeterPath+'jmeter.bat -n -t ' + jmxName + ' -l testResults.jtl'
print(cmdStr)
#This method doesn't lock the UI, but only works on Linux
#cmdStr = shlex.split(cmdStr)
self.process = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE )
print(self.process.communicate())
#TODO: Stream the jmeter.log instead of console stdout
def userAlert(self, message):
tkMessageBox.showinfo("Alert:", message)
def restart(self):
subprocess.Popen('python App.py',shell=True)
self.root.quit()
def hello(self):
tkMessageBox.showinfo("PyJMX","This feature is not implemented yet")
| null |
pyJmx-master/PyJmx.py
|
PyJmx.py
|
py
| 4,203 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.stdout",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tkinter.INSERT",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "config.jmxFolderPath",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tkinter.scrolledtext.ScrolledText",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tkinter.scrolledtext",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Menu",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tkinter.E",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "config.setPathCommand",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "config.jMeterPath",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 120,
"usage_type": "call"
}
] |
162199989
|
import networkx as nx
import matplotlib.pyplot as plt
from utils import draw_automat
from utils import get_active_states
class State(object):
def __init__(self, name='X', prefix=set(), suffix=set(),transition = {},pre_states = {}):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.transition = transition
self.pre_states = pre_states
def add_prefix(self, pre):
self.prefix.add(pre)
def add_suffix(self,suff):
self.suffix.add(suff)
def add_transition(self,char,state):
self.transition[char] = state
def merge_state(state1,state2,automata):
# state2.name += "-" + state1.name
state2.prefix = state2.prefix.union(state1.prefix)
state2.suffix = state2.suffix.union(state1.suffix)
if state2.name == 'Start':
state2.prefix = set()
for t in state1.transition:
state2.transition[t] = state1.transition[t]
for p in state1.pre_states:
state2.pre_states[p] = state1.pre_states[p]
automata.states[p].transition[state1.pre_states[p]] = state2
print('done merging')
return state2
def merge_state_prefix(state1,state2,automata):
# state2.name += "-" + state1.name
state2.prefix = state2.prefix.union(state1.prefix)
state2.suffix = state2.suffix.union(state1.suffix)
if state2.name == 'Start':
state2.prefix = set()
# for t in state1.transition:
# state2.transition[t] = state1.transition[t]
for p in state1.pre_states:
state2.pre_states[p] = state1.pre_states[p]
automata.states[p].transition[state1.pre_states[p]] = state2
print('done merging')
return state2
class Automata:
# Initializer / Instance Attributes
def __init__(self, start_state=None, accepted_states=[]):
self.start_state = start_state
self.states = {self.start_state.name:self.start_state}
self.accepted_states = accepted_states
def add_states(self,state):
self.states[state.name] = state
def set_start_state(self,start_state_name):
self.start_state = start_state_name
def set_accepted_states(self,accepted_states_name):
self.accepted_states = accepted_states_name
def add_transition(self,from_state,char,to_state):
if from_state.name not in self.states:
self.states[from_state.name] = {}
if to_state.name not in self.states:
self.states[to_state.name] = {}
self.states[from_state.name][char] = to_state
def proceed(self,w):
current_state = self.start_state
for c in w:
next_state = self.states[current_state][c]
current_state = next_state
return current_state
def is_accepted(self,state_name):
return state_name in self.accepted_states
def draw(self):
pass
def pick_min_state(stateList):
minState = stateList[0]
for state in stateList:
if len(state.prefix) + len(state.suffix) < len(minState.prefix) + len(minState.suffix):
minState = state
return minState
def pick_min_state1(state1,state2):
# len1 = len(state1.prefix) + len(state1.suffix)
# len2 = len(state2.prefix) + len(state2.suffix)
minpref1, minsuff1, minpref2, minsuff2 = 0,0,0,0
if len(state1.prefix) >0 :
minpref1 = min([len(pref) for pref in state1.prefix])
if len(state1.suffix) >0 :
minsuff1 = min([len(suff) for suff in state1.suffix])
if len(state2.prefix) >0 :
minpref2 = min([len(pref) for pref in state2.prefix])
if len(state2.suffix) >0 :
minsuff2 = min([len(suff) for suff in state2.suffix])
if minpref1 + minsuff1 < minpref2 + minsuff2:
return state2, state1
else:
return state1,state2
if __name__ == "__main__":
inpf = open('path_file.txt','r')
path_list = [line for line in inpf]
path_list_sorted = sorted(path_list, key=len)
outopf = open('path_file_sorted.txt','w')
for p in path_list_sorted:
p1 = p.replace(".","-").strip("-")[:-2] + "\n"
outopf.write(p1)
outopf.close()
inpf = open('path_file_sorted.txt', 'r')
prefixDict = {}
suffDict = {}
suffDictSet = {}
count = 1
print('---start state---')
startState = State(name='Start',prefix=set(), suffix=set(),transition = {},pre_states = {})
myAutomat = Automata(start_state=startState)
newLine = True
for line in inpf:
print('line = ',line)
newLine = False
# line1 = line.split(".")
stateTrans = line[2:].strip().split("-")
# pre = stateTrans[0]
# suff = "".join(stateTrans[1:])
startState.add_suffix("-".join(stateTrans[:]))
suffDictSet = {k: v for k, v in suffDictSet.items() if v != startState.name}
suffDictSet[frozenset(startState.suffix)] = startState.name
# prevState = State(name=str(count), prefix=set(pre), suffix=set(suff),pre_states={startState.name: [pre]})
# myAutomat.add_transition(startState,pre,prevState)
myAutomat.add_states(startState)
prevState = startState
# draw_automat(myAutomat)
def shortest_pre(state):
min_pre = 99999
res = ""
for pre in state.prefix:
if len(pre) < min_pre:
min_pre = len(pre)
res = pre
return res
for i in range(1,len(stateTrans)):
# pre = "-".join(stateTrans[:i])
if prevState.name == 'Start':
pre = stateTrans[i - 1]
else:
pre = shortest_pre(prevState) + "-" + stateTrans[i - 1]
suff = "-".join(stateTrans[i:])
stateName = str(count)
if i == len(stateTrans) - 1:
stateName = 'P'
currState = State(name=stateName,prefix=set(), suffix=set(),transition = {},pre_states = {})
currState.add_prefix(pre)
currState.add_suffix(suff)
prevState.add_transition(stateTrans[i - 1],currState)
currState.pre_states = {prevState.name:stateTrans[i - 1]}
# if suff == 'P':
# currState.add_transition(stateTrans[i],currState)
myAutomat.add_states(currState)
mergeSuffix = False
mergedState = currState
if pre in prefixDict:
# prefixDict[pre].append(count)
#---merge---
# mergeToState = pick_min_state(currState,pre)
# prefixDict[pre] = str(mergeToState.name)
state1,state2 = pick_min_state1(currState,myAutomat.states[prefixDict[pre]])
mergedState = merge_state_prefix(state1,state2,myAutomat)
currState = mergedState
suffDictSet = {k: v for k, v in suffDictSet.items() if v != state1.name}
prefixDict = {k: v for k, v in prefixDict.items() if v != state1.name}
if frozenset(currState.suffix) in suffDictSet:
# mergeToState = pick_min_state([currState] + suffDict[suff])
# suffDict[suff] = str(mergeToState.name)
state1,state2 = pick_min_state1(currState,myAutomat.states[suffDictSet[frozenset(currState.suffix)]])
if state1.name == state2.name:
prevState = state1
count += 1
continue
mergeSuffix = True
mergedState = merge_state(state1,state2, myAutomat)
active_states = get_active_states(myAutomat)
currState = mergedState
suffDictSet = {k: v for k, v in suffDictSet.items() if v in active_states}
prefixDict = {k: v for k, v in prefixDict.items() if v in active_states}
# prefixDict[pre] = mergedState.name
for pre in mergedState.prefix:
prefixDict[pre] = mergedState.name
suffDict[suff] = mergedState.name
suffDictSet = {k:v for k, v in suffDictSet.items() if v != mergedState.name}
suffDictSet[frozenset(currState.suffix)] = mergedState.name
prevState = mergedState
count+=1
# draw_automat(myAutomat)
if mergeSuffix:
break
# if newLine:
# break
# draw_automat(myAutomat)
print('Done')
| null |
lstm_exp5/gen_automat1.py
|
gen_automat1.py
|
py
| 7,159 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.get_active_states",
"line_number": 210,
"usage_type": "call"
}
] |
337418143
|
# -*-coding:utf-8-*_
from collections import namedtuple
Label = namedtuple(
'Label', ['id', 'categoryId', 'label', 'name'])
sign_instructive_labels = {
Label(0, 0, "i1", u"步行"),
Label(1, 1, "i2", u"非机动车行驶"),
Label(2, 2, "i3", u"环岛行驶"),
Label(3, 3, "i4", u"机动车行驶"),
Label(4, 4, "i5", u"靠右侧道路行驶"),
Label(5, 5, "i6", u"靠左侧道路行驶"),
Label(6, 6, "i7", u"立体交叉直行和右转弯行驶"),
Label(7, 7, "i8", u"立体交叉直行和左转弯行驶"),
Label(8, 8, "i9", u"鸣喇叭"),
Label(9, 9, "i10", u"向右转弯"),
Label(10, 10, "i11", u"向左和向右转弯"),
Label(11, 11, "i12", u"向左转弯"),
Label(12, 12, "i13", u"直行"),
Label(13, 13, "i14", u"直行和向右转弯"),
Label(14, 14, "i15", u"直行和向左转弯"),
Label(15, 15, "i16", u"最低限速"),
Label(16, 16, "i17", u"人行横道"),
Label(17, 17, "i18", u"单行路直行"),
Label(18, 18, "i19", u"路口优先通行"),
Label(19, 19, "i20", u"允许掉头"),
Label(20, 20, "i21", u"会车先行"),
Label(21, 21, "i22", u"右转车道"),
Label(22, 22, "i23", u"左转车道"),
Label(23, 23, "i24", u"直行车道"),
Label(24, 24, "i25", u"直行和右转何用车道"),
Label(25, 25, "i26", u"直行和左转何用车道"),
Label(26, 26, "i27", u"掉头车道"),
Label(27, 27, "i28", u"掉头和左转合用车道"),
Label(28, 28, "i29", u"分向行驶车道"),
Label(29, 29, "i30", u"公交专用车道"),
Label(30, 30, "i31", u"机动车车道"),
Label(31, 31, "i32", u"非机动车车道"),
Label(32, 32, "i33", u"快速公交系统专用车道"),
Label(33, 33, "i34", u"多成员车辆专用道"),
Label(34, 34, "i35", u"停车位"),
Label(35, 35, "i36", u"特殊停车位"),
}
sign_warning_labels = {
Label(0, 0, "w1", u"傍山险路"),
Label(1, 1, "w2", u"村庄"),
Label(2, 2, "w3", u"堤坝路"),
Label(3, 3, "w4", u"注意分离式道路"),
Label(4, 4, "w5", u"渡口"),
Label(5, 5, "w6", u"两侧变窄"),
Label(6, 6, "w7", u"左侧变窄"),
Label(7, 7, "w8", u"右侧变窄"),
Label(8, 8, "w9", u"窄桥"),
Label(9, 9, "w10", u"注意落石"),
Label(10, 10, "w11", u"反向弯路"),
Label(11, 11, "w12", u"过水路面"),
Label(12, 12, "w13", u"十字路口"),
Label(13, 13, "w14", u"十字交叉路口"),
Label(14, 14, "w15", u"Y形交叉路口"),
Label(15, 15, "w16", u"T形交叉路口"),
Label(16, 16, "w17", u"环形交叉路口"),
Label(17, 17, "w18", u"连续弯路"),
Label(18, 18, "w19", u"连续下坡"),
Label(19, 19, "w20", u"路面不平"),
Label(20, 20, "w21", u"注意雨雪天气"),
Label(21, 21, "w22", u"路面低洼"),
Label(22, 22, "w23", u"路面高突"),
Label(23, 23, "w24", u"慢行"),
Label(24, 24, "w25", u"上陡坡"),
Label(25, 25, "w26", u"下陡坡"),
Label(26, 26, "w27", u"施工"),
Label(27, 27, "w28", u"事故易发路段"),
Label(28, 28, "w29", u"双向交通"),
Label(29, 29, "w30", u"注意野生动物"),
Label(30, 30, "w31", u"隧道"),
Label(31, 31, "w32", u"隧道开车灯"),
Label(32, 32, "w33", u"驼峰桥"),
Label(33, 33, "w34", u"无人看守铁路道口"),
Label(34, 34, "w35", u"有人看守铁道路口"),
Label(35, 35, "w36", u"叉形符号"),
Label(36, 36, "w37", u"斜杠符号"),
Label(37, 37, "w38", u"向右急弯路"),
Label(38, 38, "w39", u"向左急弯路"),
Label(39, 39, "w40", u"易滑"),
Label(40, 40, "w41", u"注意信号灯"),
Label(41, 41, "w42", u"注意障碍物左侧通行"),
Label(42, 42, "w43", u"注意障碍物两侧通行"),
Label(43, 43, "w44", u"注意障碍物右侧通行"),
Label(44, 44, "w45", u"注意保持车距"),
Label(45, 45, "w46", u"注意不利气象条件"),
Label(46, 46, "w47", u"注意残疾人"),
Label(47, 47, "w48", u"注意潮汐车道"),
Label(48, 48, "w49", u"注意雾天"),
Label(49, 49, "w50", u"注意儿童"),
Label(50, 50, "w51", u"注意行人"),
Label(51, 51, "w52", u"注意非机动车"),
Label(52, 52, "w53", u"注意左侧合流"),
Label(53, 53, "w54", u"注意右侧合流"),
Label(54, 54, "w55", u"注意横向风"),
Label(55, 55, "w56", u"注意路面结冰"),
Label(56, 56, "w57", u"注意危险"),
Label(57, 57, "w58", u"注意牲畜"),
Label(58, 58, "w59", u"注意前方车辆排队"),
Label(59, 59, "w60", u"建议速度"),
Label(60, 60, "w61", u"避险车道"),
}
sign_prohibition_labels = {
Label(0, 0, "p1", u"禁止超车"),
Label(1, 1, "p2", u"解除禁止超车"),
Label(2, 2, "p3", u"禁止畜力车驶入"),
Label(3, 3, "p4", u"禁止大型客车驶入"),
Label(4, 4, "p5", u"禁止电动三轮车驶入"),
Label(5, 5, "p6", u"禁止掉头"),
Label(6, 6, "p7", u"禁止非机动车驶入"),
Label(7, 7, "p8", u"禁止载货汽车左转"),
Label(8, 8, "p9", u"禁止挂车半挂车驶入"),
Label(9, 9, "p10", u"禁止行人进入"),
Label(10, 10, "p11", u"禁止机动车驶入"),
Label(11, 11, "p12", u"禁止鸣喇叭"),
Label(12, 12, "p13", u"禁止摩托车驶入"),
Label(13, 13, "p14", u"禁止某两种车驶入"),
Label(14, 14, "p15", u"禁止直行"),
Label(15, 15, "p16", u"禁止人力车驶入"),
Label(16, 16, "p17", u"禁止人力货运三轮车驶入"),
Label(17, 17, "p18", u"禁止人力客运三轮车驶入"),
Label(18, 18, "p19", u"禁止三轮汽车、低速货车驶入"),
Label(19, 19, "p20", u"禁止右转弯"),
Label(20, 20, "p21", u"禁止左右转弯"),
Label(21, 21, "p22", u"禁止直行和右转弯"),
Label(22, 22, "p23", u"禁止向左转弯"),
Label(23, 23, "p24", u"禁止小客车右转"),
Label(24, 24, "p25", u"禁止小型客车驶入"),
Label(25, 25, "p26", u"禁止载货汽车驶入"),
Label(26, 26, "p27", u"禁止运载危险物品车辆驶入"),
Label(27, 27, "p28", u"禁止直行或向左转弯"),
Label(28, 28, "p29", u"禁止拖拉机驶入"),
Label(29, 29, "p30", u"限制轴重"),
Label(30, 30, "p31", u"禁止通行"),
Label(31, 31, "p32", u"停车检查"),
Label(32, 32, "p33", u"海关"),
Label(33, 33, "p34", u"会车让行"),
Label(34, 34, "p35", u"减速让行"),
Label(35, 35, "p36", u"限制高度"),
Label(36, 36, "p37", u"限制速度"),
Label(37, 37, "p38", u"限制质量"),
Label(38, 38, "p39", u"区域禁止停车"),
Label(39, 39, "p40", u"禁止驶入"),
Label(40, 40, "p41", u"禁止长时间停车"),
Label(41, 41, "p42", u"解除限制速度"),
Label(42, 42, "p43", u"停车让行"),
Label(43, 43, "p44", u"限制宽度"),
Label(44, 44, "p45", u"禁止载货货车及拖拉机左转弯"),
Label(45, 45, "p46", u"禁止停车"),
Label(46, 46, "p47", u"区域限制速度"),
Label(47, 47, "p48", u"区域限制速度解除"),
Label(48, 48, "p49", u"区域禁止长时间停车"),
}
LabelID = namedtuple(
'LabelID', ['label', 'categoryId', 'name'])
# sign_total_labels = {
# LabelID("i0", 0, u"其他"),
# LabelID("w0", 0, u"其他"),
# LabelID("p0", 0, u"其他"),
# LabelID("i1", 1, u"步行"),
# LabelID("i2", 1, u"非机动车行驶"),
# LabelID("i3", 1, u"环岛行驶"),
# LabelID("i4", 1, u"机动车行驶"),
# LabelID("i5", 2, u"靠右侧道路行驶"),
# LabelID("i6", 2, u"靠左侧道路行驶"),
# LabelID("i7", 2, u"立体交叉直行和右转弯行驶"),
# LabelID("i8", 2, u"立体交叉直行和左转弯行驶"),
# LabelID("i9", 1, u"鸣喇叭"),
# LabelID("i10", 2, u"向右转弯"),
# LabelID("i11", 2, u"向左和向右转弯"),
# LabelID("i12", 2, u"向左转弯"),
# LabelID("i13", 2, u"直行"),
# LabelID("i14", 2, u"直行和向右转弯"),
# LabelID("i15", 2, u"直行和向左转弯"),
# LabelID("i16", 2, u"最低限速"),
# LabelID("i17", 2, u"人行横道"),
# LabelID("i18", 2, u"单行路直行"),
# LabelID("i19", 2, u"路口优先通行"),
# LabelID("i20", 2, u"允许掉头"),
# LabelID("i21", 2, u"会车先行"),
# LabelID("i22", 3, u"右转车道"),
# LabelID("i23", 3, u"左转车道"),
# LabelID("i24", 3, u"直行车道"),
# LabelID("i25", 3, u"直行和右转何用车道"),
# LabelID("i26", 3, u"直行和左转何用车道"),
# LabelID("i27", 3, u"掉头车道"),
# LabelID("i28", 3, u"掉头和左转合用车道"),
# LabelID("i29", 3, u"分向行驶车道"),
# LabelID("i30", 3, u"公交专用车道"),
# LabelID("i31", 3, u"机动车车道"),
# LabelID("i32", 3, u"非机动车车道"),
# LabelID("i33", 3, u"快速公交系统专用车道"),
# LabelID("i34", 3, u"多成员车辆专用道"),
# LabelID("i35", 3, u"停车位"),
# LabelID("i36", 3, u"特殊停车位"),
# LabelID("w1", 4, u"傍山险路"),
# LabelID("w2", 4, u"村庄"),
# LabelID("w3", 4, u"堤坝路"),
# LabelID("w4", 4, u"注意分离式道路"),
# LabelID("w5", 4, u"渡口"),
# LabelID("w6", 4, u"两侧变窄"),
# LabelID("w7", 4, u"左侧变窄"),
# LabelID("w8", 4, u"右侧变窄"),
# LabelID("w9", 4, u"窄桥"),
# LabelID("w10", 4, u"注意落石"),
# LabelID("w11", 4, u"反向弯路"),
# LabelID("w12", 4, u"过水路面"),
# LabelID("w13", 5, u"十字路口"),
# LabelID("w14", 5, u"十字交叉路口"),
# LabelID("w15", 5, u"Y形交叉路口"),
# LabelID("w16", 5, u"T形交叉路口"),
# LabelID("w17", 5, u"环形交叉路口"),
# LabelID("w18", 5, u"连续弯路"),
# LabelID("w19", 5, u"连续下坡"),
# LabelID("w20", 5, u"路面不平"),
# LabelID("w21", 5, u"注意雨雪天气"),
# LabelID("w22", 5, u"路面低洼"),
# LabelID("w23", 5, u"路面高突"),
# LabelID("w24", 6, u"慢行"),
# LabelID("w25", 6, u"上陡坡"),
# LabelID("w26", 6, u"下陡坡"),
# LabelID("w27", 6, u"施工"),
# LabelID("w28", 6, u"事故易发路段"),
# LabelID("w29", 6, u"双向交通"),
# LabelID("w30", 6, u"注意野生动物"),
# LabelID("w31", 6, u"隧道"),
# LabelID("w32", 6, u"隧道开车灯"),
# LabelID("w33", 6, u"驼峰桥"),
# LabelID("w34", 6, u"无人看守铁路道口"),
# LabelID("w35", 6, u"有人看守铁道路口"),
# LabelID("w36", 7, u"叉形符号"),
# LabelID("w37", 7, u"斜杠符号"),
# LabelID("w38", 7, u"向右急弯路"),
# LabelID("w39", 7, u"向左急弯路"),
# LabelID("w40", 7, u"易滑"),
# LabelID("w41", 8, u"注意信号灯"),
# LabelID("w42", 8, u"注意障碍物左侧通行"),
# LabelID("w43", 8, u"注意障碍物两侧通行"),
# LabelID("w44", 8, u"注意障碍物右侧通行"),
# LabelID("w45", 8, u"注意保持车距"),
# LabelID("w46", 8, u"注意不利气象条件"),
# LabelID("w47", 8, u"注意残疾人"),
# LabelID("w48", 8, u"注意潮汐车道"),
# LabelID("w49", 8, u"注意雾天"),
# LabelID("w50", 8, u"注意儿童"),
# LabelID("w51", 8, u"注意行人"),
# LabelID("w52", 8, u"注意非机动车"),
# LabelID("w53", 9, u"注意左侧合流"),
# LabelID("w54", 9, u"注意右侧合流"),
# LabelID("w55", 8, u"注意横向风"),
# LabelID("w56", 8, u"注意路面结冰"),
# LabelID("w57", 8, u"注意危险"),
# LabelID("w58", 8, u"注意牲畜"),
# LabelID("w59", 8, u"注意前方车辆排队"),
# LabelID("w60", 8, u"建议速度"),
# LabelID("w61", 8, u"避险车道"),
# LabelID("p1", 10, u"禁止超车"),
# LabelID("p2", 10, u"解除禁止超车"),
# LabelID("p3", 11, u"禁止畜力车驶入"),
# LabelID("p4", 11, u"禁止大型客车驶入"),
# LabelID("p5", 11, u"禁止电动三轮车驶入"),
# LabelID("p6", 11, u"禁止掉头"),
# LabelID("p7", 11, u"禁止非机动车驶入"),
# LabelID("p8", 13, u"禁止载货汽车左转"),
# LabelID("p9", 11, u"禁止挂车半挂车驶入"),
# LabelID("p10", 12, u"禁止行人进入"),
# LabelID("p11", 11, u"禁止机动车驶入"),
# LabelID("p12", 10, u"禁止鸣喇叭"),
# LabelID("p13", 11, u"禁止摩托车驶入"),
# LabelID("p14", 11, u"禁止某两种车驶入"),
# LabelID("p15", 13, u"禁止直行"),
# LabelID("p16", 11, u"禁止人力车驶入"),
# LabelID("p17", 11, u"禁止人力货运三轮车驶入"),
# LabelID("p18", 11, u"禁止人力客运三轮车驶入"),
# LabelID("p19", 11, u"禁止三轮汽车、低速货车驶入"),
# LabelID("p20", 13, u"禁止右转弯"),
# LabelID("p21", 13, u"禁止左右转弯"),
# LabelID("p22", 13, u"禁止直行和右转弯"),
# LabelID("p23", 13, u"禁止向左转弯"),
# LabelID("p24", 13, u"禁止小客车右转"),
# LabelID("p25", 11, u"禁止小型客车驶入"),
# LabelID("p26", 11, u"禁止载货汽车驶入"),
# LabelID("p27", 11, u"禁止运载危险物品车辆驶入"),
# LabelID("p28", 13, u"禁止直行或向左转弯"),
# LabelID("p29", 11, u"禁止拖拉机驶入"),
# LabelID("p30", 14, u"限制轴重"),
# LabelID("p31", 15, u"禁止通行"),
# LabelID("p32", 15, u"停车检查"),
# LabelID("p33", 15, u"海关"),
# LabelID("p34", 15, u"会车让行"),
# LabelID("p35", 15, u"减速让行"),
# LabelID("p36", 16, u"限制高度"),
# LabelID("p37", 17, u"限制速度"),
# LabelID("p38", 14, u"限制质量"),
# LabelID("p39", 15, u"区域禁止停车"),
# LabelID("p40", 15, u"禁止驶入"),
# LabelID("p41", 15, u"禁止长时间停车"),
# LabelID("p42", 17, u"解除限制速度"),
# LabelID("p43", 15, u"停车让行"),
# LabelID("p44", 14, u"限制宽度"),
# LabelID("p45", 13, u"禁止载货货车及拖拉机左转弯"),
# LabelID("p46", 15, u"禁止停车"),
# LabelID("p47", 17, u"区域限制速度"),
# LabelID("p48", 17, u"区域限制速度解除"),
# LabelID("p49", 15, u"区域禁止长时间停车"),
# }
sign_total_labels = {
LabelID("i0", 0, u"其他"),
LabelID("w0", 0, u"其他"),
LabelID("p0", 0, u"其他"),
LabelID("i1", 1, u"步行"),
LabelID("i2", 1, u"非机动车行驶"),
LabelID("i3", 1, u"环岛行驶"),
LabelID("i4", 2, u"机动车行驶"),
LabelID("i5", 3, u"靠右侧道路行驶"),
LabelID("i6", 3, u"靠左侧道路行驶"),
LabelID("i7", 3, u"立体交叉直行和右转弯行驶"),
LabelID("i8", 3, u"立体交叉直行和左转弯行驶"),
LabelID("i9", 3, u"鸣喇叭"),
LabelID("i10", 4, u"向右转弯"),
LabelID("i11", 4, u"向左和向右转弯"),
LabelID("i12", 5, u"向左转弯"),
LabelID("i13", 6, u"直行"),
LabelID("i14", 6, u"直行和向右转弯"),
LabelID("i15", 6, u"直行和向左转弯"),
LabelID("i16", 6, u"最低限速"),
LabelID("i17", 6, u"人行横道"),
LabelID("i18", 6, u"单行路直行"),
LabelID("i19", 6, u"路口优先通行"),
LabelID("i20", 7, u"允许掉头"),
LabelID("i21", 7, u"会车先行"),
LabelID("i22", 7, u"右转车道"),
LabelID("i23", 7, u"左转车道"),
LabelID("i24", 8, u"直行车道"),
LabelID("i25", 9, u"直行和右转何用车道"),
LabelID("i26", 9, u"直行和左转何用车道"),
LabelID("i27", 9, u"掉头车道"),
LabelID("i28", 9, u"掉头和左转合用车道"),
LabelID("i29", 10, u"分向行驶车道"),
LabelID("i30", 10, u"公交专用车道"),
LabelID("i31", 10, u"机动车车道"),
LabelID("i32", 10, u"非机动车车道"),
LabelID("i33", 10, u"快速公交系统专用车道"),
LabelID("i34", 10, u"多成员车辆专用道"),
LabelID("i35", 10, u"停车位"),
LabelID("i36", 10, u"特殊停车位"),
LabelID("w1", 11, u"傍山险路"),
LabelID("w2", 11, u"村庄"),
LabelID("w3", 11, u"堤坝路"),
LabelID("w4", 11, u"注意分离式道路"),
LabelID("w5", 11, u"渡口"),
LabelID("w6", 11, u"两侧变窄"),
LabelID("w7", 12, u"左侧变窄"),
LabelID("w8", 13, u"右侧变窄"),
LabelID("w9", 13, u"窄桥"),
LabelID("w10", 13, u"注意落石"),
LabelID("w11", 14, u"反向弯路"),
LabelID("w12", 14, u"过水路面"),
LabelID("w13", 15, u"十字路口"),
LabelID("w14", 16, u"十字交叉路口"),
LabelID("w15", 17, u"Y形交叉路口"),
LabelID("w16", 18, u"T形交叉路口"),
LabelID("w17", 18, u"环形交叉路口"),
LabelID("w18", 18, u"连续弯路"),
LabelID("w19", 18, u"连续下坡"),
LabelID("w20", 18, u"路面不平"),
LabelID("w21", 18, u"注意雨雪天气"),
LabelID("w22", 18, u"路面低洼"),
LabelID("w23", 18, u"路面高突"),
LabelID("w24", 19, u"慢行"),
LabelID("w25", 20, u"上陡坡"),
LabelID("w26", 20, u"下陡坡"),
LabelID("w27", 21, u"施工"),
LabelID("w28", 22, u"事故易发路段"),
LabelID("w29", 23, u"双向交通"),
LabelID("w30", 23, u"注意野生动物"),
LabelID("w31", 24, u"隧道"),
LabelID("w32", 24, u"隧道开车灯"),
LabelID("w33", 24, u"驼峰桥"),
LabelID("w34", 24, u"无人看守铁路道口"),
LabelID("w35", 24, u"有人看守铁道路口"),
LabelID("w36", 24, u"叉形符号"),
LabelID("w37", 24, u"斜杠符号"),
LabelID("w38", 25, u"向右急弯路"),
LabelID("w39", 26, u"向左急弯路"),
LabelID("w40", 26, u"易滑"),
LabelID("w41", 26, u"注意信号灯"),
LabelID("w42", 26, u"注意障碍物左侧通行"),
LabelID("w43", 27, u"注意障碍物两侧通行"),
LabelID("w44", 27, u"注意障碍物右侧通行"),
LabelID("w45", 27, u"注意保持车距"),
LabelID("w46", 27, u"注意不利气象条件"),
LabelID("w47", 27, u"注意残疾人"),
LabelID("w48", 27, u"注意潮汐车道"),
LabelID("w49", 27, u"注意雾天"),
LabelID("w50", 28, u"注意儿童"),
LabelID("w51", 29, u"注意行人"),
LabelID("w52", 29, u"注意非机动车"),
LabelID("w53", 30, u"注意左侧合流"),
LabelID("w54", 31, u"注意右侧合流"),
LabelID("w55", 31, u"注意横向风"),
LabelID("w56", 31, u"注意路面结冰"),
LabelID("w57", 32, u"注意危险"),
LabelID("w58", 32, u"注意牲畜"),
LabelID("w59", 32, u"注意前方车辆排队"),
LabelID("w60", 33, u"建议速度"),
LabelID("w61", 33, u"避险车道"),
LabelID("p1", 34, u"禁止超车"),
LabelID("p2", 34, u"解除禁止超车"),
LabelID("p3", 35, u"禁止畜力车驶入"),
LabelID("p4", 36, u"禁止大型客车驶入"),
LabelID("p5", 37, u"禁止电动三轮车驶入"),
LabelID("p6", 37, u"禁止掉头"),
LabelID("p7", 38, u"禁止非机动车驶入"),
LabelID("p8", 39, u"禁止载货汽车左转"),
LabelID("p9", 40, u"禁止挂车半挂车驶入"),
LabelID("p10", 41, u"禁止行人进入"),
LabelID("p11", 42, u"禁止机动车驶入"),
LabelID("p12", 43, u"禁止鸣喇叭"),
LabelID("p13", 44, u"禁止摩托车驶入"),
LabelID("p14", 45, u"禁止某两种车驶入"),
LabelID("p15", 45, u"禁止直行"),
LabelID("p16", 46, u"禁止人力车驶入"),
LabelID("p17", 47, u"禁止人力货运三轮车驶入"),
LabelID("p18", 48, u"禁止人力客运三轮车驶入"),
LabelID("p19", 49, u"禁止三轮汽车、低速货车驶入"),
LabelID("p20", 50, u"禁止右转弯"),
LabelID("p21", 51, u"禁止左右转弯"),
LabelID("p22", 51, u"禁止直行和右转弯"),
LabelID("p23", 52, u"禁止向左转弯"),
LabelID("p24", 53, u"禁止小客车右转"),
LabelID("p25", 53, u"禁止小型客车驶入"),
LabelID("p26", 54, u"禁止载货汽车驶入"),
LabelID("p27", 54, u"禁止运载危险物品车辆驶入"),
LabelID("p28", 54, u"禁止直行或向左转弯"),
LabelID("p29", 55, u"禁止拖拉机驶入"),
LabelID("p30", 56, u"限制轴重"),
LabelID("p31", 57, u"禁止通行"),
LabelID("p32", 57, u"停车检查"),
LabelID("p33", 57, u"海关"),
LabelID("p34", 57, u"会车让行"),
LabelID("p35", 58, u"减速让行"),
LabelID("p36", 59, u"限制高度"),
LabelID("p37", 60, u"限制速度"),
LabelID("p38", 61, u"限制质量"),
LabelID("p39", 61, u"区域禁止停车"),
LabelID("p40", 62, u"禁止驶入"),
LabelID("p41", 63, u"禁止长时间停车"),
LabelID("p42", 64, u"解除限制速度"),
LabelID("p43", 65, u"停车让行"),
LabelID("p44", 65, u"限制宽度"),
LabelID("p45", 66, u"禁止载货货车及拖拉机左转弯"),
LabelID("p46", 67, u"禁止停车"),
LabelID("p47", 67, u"区域限制速度"),
LabelID("p48", 68, u"区域限制速度解除"),
LabelID("p49", 68, u"区域禁止长时间停车"),
}
| null |
sign_classfication/sign_labels.py
|
sign_labels.py
|
py
| 20,962 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 164,
"usage_type": "call"
}
] |
347645632
|
# -*- coding: utf-8 -*-
# This file defines the functions for reading and writing profiles.
# Import os for creating directories.
import os
# Import glob for getting a list of the profiles.
import glob
# Import time for formatting times.
import time
# Import json for saving the configuration file.
import json
# Import pickle for loading and saving the data.
try:
import cPickle as pickle
except ImportError:
import pickle
def write_profile(main_dir = "", name = "", filename = "", data = []):
"""Writes the data to the profile file."""
# Get the filename.
filename = filename if filename != "" else "%s/profiles/%s/weather" % (main_dir, name)
try:
data_file = open(filename, "w")
pickle.dump(data, data_file)
data_file.close()
return True
except IOError:
print("Error saving data file (IOError).")
return False
except (TypeError, ValueError):
print("Error saving data file (TypeError or ValueError).")
return False
def read_profile(main_dir = "", name = "", filename = ""):
"""Reads the data from the profile file."""
# Get the filename.
filename = filename if filename != "" else "%s/profiles/%s/weather" % (main_dir, name)
try:
data_file = open(filename, "r")
data = pickle.load(data_file)
data_file.close()
except IOError:
print("Error importing data (IOError).")
data = []
except (TypeError, ValueError):
print("Error importing data (TypeError or ValueError).")
data = []
return data
def write_blank_profile(main_dir, name):
"""Writes a blank profile file."""
os.makedirs("%s/profiles/%s" % (main_dir, name))
new_prof_file = open("%s/profiles/%s/weather" % (main_dir, name), "w")
pickle.dump([], new_prof_file)
new_prof_file.close()
def write_standard_file(filename, data):
"""Writes a file without formatting it as JSON."""
try:
data_file = open(filename, "w")
data_file.write(data)
data_file.close()
except IOError:
print("Error saving data file (IOError).")
def get_profile_list(main_dir, last_profile):
"""Gets the list of profiles."""
# Remember the currect directory and switch to where the profiles are stored.
current_dir = os.getcwd()
os.chdir("%s/profiles" % main_dir)
# Get the list of profiles, remove the current profile, and sort the list.
profiles = glob.glob("*")
profiles = list(set(profiles) - set([last_profile]))
profiles.sort()
# Get the creation and last modified dates.
for i in range(0, len(profiles)):
# Get the dates.
creation, modified = get_metadata(main_dir, profiles[i])
profiles[i] = [profiles[i], creation, modified]
# Switch back to the previous directory.
os.chdir(current_dir)
return profiles
def get_metadata(main_dir, last_profile):
"""Gets the current metadata."""
try:
meta_file = open("%s/profiles/%s/metadata" % (main_dir, last_profile), "r")
creation = meta_file.readline().strip()
modified = meta_file.readline().strip()
meta_file.close()
except IOError:
print("Error reading metadata file (IOError).")
creation = "Error"
modified = "Error"
return creation, modified
def write_metadata(main_dir, last_profile, creation, modified):
"""Writes the metadata file."""
try:
meta_file = open("%s/profiles/%s/metadata" % (main_dir, last_profile), "w")
meta_file.write("%s\n%s" % (creation, modified))
meta_file.close()
except IOError:
print("Error saving metadata file (IOError).")
def write_config(conf_dir, config):
"""Saves the configuration."""
try:
config_file = open("%s/config" % conf_dir, "w")
json.dump(config, config_file)
config_file.close()
except IOError:
print("Error saving configuration file (IOError).")
except (TypeError, ValueError):
print("Error saving configuration file (TypeError or ValueError).")
def write_last_profile(conf_dir, last_profile):
"""Saves the last profile."""
try:
prof_file = open("%s/lastprofile" % conf_dir, "w")
prof_file.write(last_profile)
prof_file.close()
except IOError:
print("Error saving profile file (IOError).")
| null |
weatherlog_resources/io.py
|
io.py
|
py
| 4,526 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pickle.dump",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 145,
"usage_type": "call"
}
] |
80049191
|
import torch
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
from torchvision import datasets
from tqdm import tqdm
from capsnet import CapsNet
from functions import DigitMarginLoss
from functions import accuracy
batch_size = 32
train_loader = DataLoader(datasets.MNIST('data', train=True, download=False, transform=transforms.Compose([
# transforms.RandomShift(2),
transforms.ToTensor()])), batch_size=batch_size, shuffle=True)
test_loader = DataLoader(datasets.MNIST('data', train=False, transform=transforms.Compose([
transforms.ToTensor()])), batch_size=batch_size)
model = CapsNet(with_reconstruction=False)
optimizer = optim.Adam(model.parameters(), weight_decay=1e-4)
margin_loss = DigitMarginLoss()
reconstruction_loss = torch.nn.MSELoss(size_average=False)
if torch.cuda.is_available():
model.cuda()
margin_loss.cuda()
reconstruction_loss.cuda()
model.train()
for epoch in range(1, 11):
epoch_tot_loss = 0
epoch_tot_acc = 0
bar = tqdm(train_loader, total=len(train_loader), initial=1)
for data, target in bar:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
data = Variable(data)
target = Variable(target)
if model.with_reconstruction:
digit_caps, reconstruction = model(data, target)
loss = margin_loss(digit_caps, target) + 0.0005 * reconstruction_loss(reconstruction, data.view(-1))
else:
digit_caps = model(data, target)
loss = margin_loss(digit_caps, target)
epoch_tot_loss += loss.data[0]
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
acc = accuracy(digit_caps, target)
epoch_tot_acc += acc
bar.set_description("epoch: {} [ loss: {:.4f} ] [ acc: {:.2f}% ]".format(epoch, epoch_tot_loss / batch_size,
100 * (epoch_tot_acc / batch_size)))
| null |
main.py
|
main.py
|
py
| 2,090 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.utils.data.dataloader.DataLoader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.dataloader.DataLoader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "capsnet.CapsNet",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "functions.DigitMarginLoss",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "functions.accuracy",
"line_number": 53,
"usage_type": "call"
}
] |
629136088
|
# -*- coding: UTF-8 -*-
"""
Author : Ken-Kei
Create Date : 2016/07/01
"""
import configparser
import time
import os
# ====================================================================
config = configparser.ConfigParser()
config.read("E:\PythonWorkspace\Automation\JiuFu-Automation\config.ini")
# ====================================================================
# ====================================================================
"""指定驱动文件位置"""
ie_driver_path = os.path.join("./Driver/IEDriverServer.exe")
# ====================================================================
# ====================================================================
"""Config.ini属性配置"""
# [url_info]
browser = config.get("url_info", "browser") # 选择浏览器
main_url = config.get("url_info", "main_url") # 主页url
waiting_time = config.getint("url_info", "waiting_time") # 页面等待时间
operation_system = config.get("url_info", "operation_system") # 测试使用的操作系统
# [driver_path_info]
chrome_driver_path = config.get("driver_path_info", "chrome_driver_path") # chrome driver文件路径
# [account_info]
account_row = config.getint("account_info", "account_row") # 读取到excel表的账号行数
username = config.get("account_info", "username") # 后台使用的账号
password = config.get("account_info", "password") # 后台使用的密码
wrong_username = config.get("account_info", "wrong_username") # 错误的用户名
wrong_password = config.get("account_info", "wrong_password") # 错误的密码
# [test_result]
data_source = config.get("test_result", "data_source") # 测试数据数据源
need_screenshot = config.get("test_result", "need_screenshot") # 是否需要截图
# [email_info]
smtp_server = config.get("email_info", "smtp_server") # smtp服务器地址
smtp_server_port = config.get("email_info", "smtp_server_port") # smtp服务器地址端口号
from_email_address = config.get("email_info", "from_email_address") # 发件人账号
from_email_address_pwd = config.get("email_info", "from_email_address_pwd") # 发件人密码
to_mail_address = config.get("email_info", "to_email_address").split(',') # 收件人地址
cc_mail_address = config.get("email_info", "cc_email_address").split(',') # 抄送人地址
# [cron_job_time_setting]
time_set = config.get("cron_job_time_setting", "time_set") # 定时任务触发的时间
# ====================================================================
# ====================================================================
# """初始化读excel操作"""
#
# data_source_path = os.path.join("./DataSource/" + data_source)
# data = xlrd.open_workbook(data_source_path)
# table = data.sheet_by_name('account')
# copy_data = copy(data)
# copy_sheet = copy_data.get_sheet(0)
# ====================================================================
# ====================================================================
"""log文件和截图文件路径变量"""
launch_log_path = os.path.join("./Log/Log_" + time.strftime("%Y%m%d"))
launch_screenshot_path = os.path.join("./Log/Screenshot_" + time.strftime("%Y%m%d") + "/" + time.strftime("%H%M%S"))
launch_result_path = os.path.join("./Result/Result_" + time.strftime("%Y%m%d"))
# ====================================================================
# ====================================================================
"""截图文件名"""
# 登入登出截图
login_failed_screenshot = '登录失败'
login_succeed_screenshot = '登录成功'
logout_succeed_screenshot = '登出成功'
logout_failed_screenshot = '登出失败'
# 创建套图分类
create_pc_succeed_screenshot = '创建套图分类成功'
create_pc_failed_screenshot = '创建套图分类失败'
# 创建套图
create_pk_succeed_screenshot = '创建套图成功'
create_pk_failed_screenshot = '创建套图失败'
# 创建卡券
create_card_succeed_screenshot = '创建卡券成功'
create_card_failed_screenshot = '创建卡券失败'
card_name_exist_screenshot = '创建卡券失败-卡券名称重复'
# 创建微助力活动
create_mh_succeed_screenshot = '创建微助力成功'
create_mh_failed_screenshot = '创建微助力失败'
# 创建渠道二维码
create_code_succeed_screenshot = '创建渠道二维码成功'
create_code_failed_screenshot = '创建渠道二维码失败'
# ====================================================================
# 图库上传的元素位置
# .//*[@id='upImgs']/div/div/div[4]/div/div[2]/div/div[1]/div
# ====================================================================
"""用例名"""
test_LoginSucceed = '登入以及登出工单系统成功'
test_LoginFailedWithWrongUser = '错误的用户名登录'
test_LoginFailedWithWrongPwd = '错误的密码登录'
test_CreatePictureClassify = '创建套图分类'
test_CreatePictureKit = '创建套图'
test_CreateCard = '创建卡券'
test_CreateMicroHelp = '创建微助力活动'
test_CreateQRCode = '创建渠道二维码'
# ====================================================================
# ====================================================================
"""log信息"""
loging_in = "正在使用此用户登录: %s"
# ====================================================================
# ====================================================================
"""指定邮件标题、正文及其它信息"""
# 邮件标题
subject = '自动化用例执行结果'
# 邮件正文
main_body = """
自动化测试用例已经执行完成,以下为测试报告概况,用例的详细执行状况请查看附件。
==============================================
这是自动发送的邮件,请勿答复。
以上
测试组
"""
# ====================================================================
# ====================================================================
"""JS语句"""
# 上传图片的元素id为fileImage
file_image_block = """document.getElementById('fileImage').style.display='block'; """
file_image_none = """document.getElementById('fileImage').style.display='none'; """
remove_sd_read_only = """document.getElementById('js-startDate').readOnly=false; """
remove_ed_read_only = """document.getElementById('js-endDate').readOnly=false; """
# ====================================================================
| null |
attribute.py
|
attribute.py
|
py
| 6,348 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 79,
"usage_type": "call"
}
] |
168310450
|
import discord
from discord.ext import commands
from .utils.dataIO import fileIO
from random import choice as randchoice
import os
class Compliment:
"""Compliment Cog"""
def __init__(self, bot):
self.bot = bot
self.compliments = fileIO("data/compliment/compliment.json", "load")
@commands.command(pass_context=True, no_pm=True, aliases=["cpl"])
async def compliment(self, ctx, user : discord.Member=None):
"""Compliment the user"""
msg = ' '
if user != None:
if user.id == self.bot.user.id:
user = ctx.message.author
msg = [" Hey I appreciate the compliment! :smile:", "No ***YOU'RE*** awesome! :smile:"]
await self.bot.say(user.mention + randchoice(msg))
else:
await self.bot.say(user.mention + msg + randchoice(self.compliments))
else:
await self.bot.say(ctx.message.author.mention + msg + randchoice(self.compliments))
def check_folders():
if not os.path.exists("data/compliment"):
print("Creating data/compliment folder...")
os.makedirs("data/compliment")
def check_files():
if not fileIO("data/compliment/compliment.json", "check"):
print("Creating empty compliment.json...")
fileIO("data/compliment/compliment.json", "save", [])
def setup(bot):
check_folders()
check_files()
n = Compliment(bot)
bot.add_cog(n)
| null |
compliment/compliment.py
|
compliment.py
|
py
| 1,450 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.dataIO.fileIO",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Member",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.dataIO.fileIO",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "utils.dataIO.fileIO",
"line_number": 43,
"usage_type": "call"
}
] |
630424188
|
import os
import sys
import glob
import nltk
import sqlite3
import unittest
import math
connection = None
class TestStringMethods(unittest.TestCase):
def test_illegalInput(self):
os.system("python3 ./../vs_query.py ./Documents/data.db > output1.txt")
fp = open("./output1.txt")
for line in fp:
first_line = line.split()
break
fp.close()
self.assertEqual(first_line[0],"Error")
def test_query_one(self):
os.system("python3 ./../vs_query.py ./Documents/data.db 4 y one > output2.txt")
fp = open("./output2.txt")
line_list = [ line for line in fp ]
fp.close()
for l in line_list:
if len(l.split()) > 1:
score = float(l.split()[1])
self.assertEqual(score,0.0)
else:
self.assertTrue()
def test_query_two(self):
os.system("python3 ./../vs_query.py ./Documents/data.db 4 y ten NINE y one > output3.txt")
fp = open("./output3.txt")
line_list = [ line for line in fp ]
fp.close()
for l in line_list:
if len(l.split()) > 1:
score = float(l.split()[1])
self.assertEqual(score,0.0)
else:
self.assertTrue(False)
def test_query_three(self):
os.system("python3 ./../vs_query.py ./Documents/data.db 4 y two three > output4.txt")
fp = open("./output4.txt")
line_list = [ line for line in fp ]
fp.close()
for l in line_list:
if len(l.split()) > 1:
score = float(l.split()[1])
doc_id = int(l.split()[0])
if doc_id == 1:
self.assertEqual(score,1.0)
elif doc_id == 2 or doc_id == 4:
self.assertTrue(score<0.707108)
self.assertTrue(score>0.707106)
else:
self.assertEqual(score,0.0)
else:
self.assertTrue(False)
'''
# this function is to execute query
'''
def executeQuery(query):
cur = connection.cursor() #get cursor
return [i for i in cur.execute(query)]
'''
# this function is to connect to database
'''
def connectionDataBase(data_file_name):
global connection
#create a connection to database with file name "data_file_name", if error print it out
try:
connection = sqlite3.connect(data_file_name)
return connection
except Exception as e:
print(e)
exit("Error,unit_test file can not connect database")
'''
# this function is to check if create_index.py exist
# check if Documents contains correct docuemnts
'''
def checkFiles():
if len(sys.argv) != 1:
exit("Error, command line error..")
else:
if not os.path.isfile("./../vs_query.py"):
exit("Error, vs_query.py does not exit..")
try:
for filepath in glob.glob(os.path.join("./Documents", '*.txt')):
if int(filepath.split(".")[-2].split("_")[1]) > 4 or int(filepath.split(".")[-2].split("_")[1]) < 1:
exit("Error, Document not correct")
except Exception:
raise
exit("Error, Documents' files not correct")
print("The documents and python script are correct...\nchecking...")
if __name__ == '__main__':
checkFiles()
connection = connectionDataBase("./Documents/data.db")
unittest.main(verbosity=2)
| null |
assignment1/Part3/Part3_test/vs_query_unit_test.py
|
vs_query_unit_test.py
|
py
| 2,932 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 116,
"usage_type": "call"
}
] |
209470629
|
#!/usr/bin/env python
import django
import django.dispatch
from django.dispatch import receiver
from django.conf import settings
from django.contrib.admin.models import LogEntry
from DjangoBlog.utils import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.db.models.signals import post_save
from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
from DjangoBlog.utils import cache, send_email, expire_view_cache, delete_sidebar_cache, delete_view_cache
from DjangoBlog.spider_notify import SpiderNotify
from oauth.models import OAuthUser
from blog.models import Article, Category, Tag, Links, SideBar, BlogSettings
from comments.models import Comment
from comments.utils import send_comment_email
import _thread
import logging
import os
from DjangoBlog.settings import EMAIL_FILES
from email.mime.image import MIMEImage
logger = logging.getLogger(__name__)
oauth_user_login_signal = django.dispatch.Signal(providing_args=['id'])
send_email_signal = django.dispatch.Signal(providing_args=['emailto', 'title', 'content'])
@receiver(send_email_signal)
def send_email_signal_handler(sender, **kwargs):
emailto = kwargs['emailto']
title = kwargs['title']
content = kwargs['content']
images = kwargs['images']
msg = EmailMultiAlternatives(title, content, from_email=settings.DEFAULT_FROM_EMAIL, to=emailto)
msg.content_subtype = "html"
msg.mixed_subtype = 'related'
if images is not None:
for key, value in images.items():
full_path = os.path.join(EMAIL_FILES, key)
if os.path.isfile(full_path):
img_data = open(full_path, 'rb').read()
img = MIMEImage(img_data, value)
img.add_header('Content-Id', key)
img.add_header("Content-Disposition", "inline", filename=key)
msg.attach(img)
from servermanager.models import EmailSendLog
log = EmailSendLog()
log.title = title
log.content = content
log.emailto = ','.join(emailto)
try:
result = msg.send()
log.send_result = result > 0
except Exception as e:
logger.error(e)
log.send_result = False
log.save()
@receiver(oauth_user_login_signal)
def oauth_user_login_signal_handler(sender, **kwargs):
id = kwargs['id']
oauthuser = OAuthUser.objects.get(id=id)
site = get_current_site().domain
if oauthuser.picture and not oauthuser.matedata.find(site) >= 0:
from DjangoBlog.utils import save_user_avatar
oauthuser.picture = save_user_avatar(oauthuser.picture)
oauthuser.save()
delete_sidebar_cache(oauthuser.author.username)
cache.clear()
@receiver(post_save)
def model_post_save_callback(sender, instance, created, raw, using, update_fields, **kwargs):
clearcache = False
if isinstance(instance, LogEntry):
return
if 'get_full_url' in dir(instance):
is_update_views = update_fields == {'views'}
if not is_update_views:
clearcache = True
if isinstance(instance, Comment):
path = instance.article.get_absolute_url()
site = get_current_site().domain
if site.find(':') > 0:
site = site[0:site.find(':')]
expire_view_cache(path, servername=site, serverport=80, key_prefix='blogdetail')
if cache.get('seo_processor'):
cache.delete('seo_processor')
comment_cache_key = 'article_comments_{id}'.format(id=instance.article.id)
cache.delete(comment_cache_key)
delete_sidebar_cache(instance.author.username)
delete_view_cache('article_comments', [str(instance.article.pk)])
_thread.start_new(send_comment_email, (instance,))
if clearcache:
cache.clear()
@receiver(user_logged_in)
@receiver(user_logged_out)
def user_auth_callback(sender, request, user, **kwargs):
if user and user.username:
logger.info(user)
delete_sidebar_cache(user.username)
cache.clear()
| null |
DjangoBlog/blog_signals.py
|
blog_signals.py
|
py
| 4,019 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.dispatch.Signal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.dispatch",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.dispatch.Signal",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.dispatch",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "django.core.mail.EmailMultiAlternatives",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.settings.EMAIL_FILES",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "email.mime.image.MIMEImage",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "servermanager.models.EmailSendLog",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "oauth.models.OAuthUser.objects.get",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "oauth.models.OAuthUser.objects",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "oauth.models.OAuthUser",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "DjangoBlog.utils.get_current_site",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.save_user_avatar",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.delete_sidebar_cache",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache.clear",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.models.LogEntry",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "comments.models.Comment",
"line_number": 89,
"usage_type": "argument"
},
{
"api_name": "DjangoBlog.utils.get_current_site",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.expire_view_cache",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache.get",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "DjangoBlog.utils.cache.delete",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "DjangoBlog.utils.cache.delete",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "DjangoBlog.utils.delete_sidebar_cache",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.delete_view_cache",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "_thread.start_new",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "comments.utils.send_comment_email",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "DjangoBlog.utils.cache.clear",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "DjangoBlog.utils.delete_sidebar_cache",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache.clear",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "DjangoBlog.utils.cache",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.signals.user_logged_in",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.signals.user_logged_out",
"line_number": 111,
"usage_type": "argument"
}
] |
592865904
|
from __future__ import with_statement
from sqlite3 import dbapi2 as sqlite3
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
import models
# configuration settings
DATABASE = 'pom.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.db.close()
@app.route('/')
def splash():
return render_template('splash.html')
@app.route('/plan')
def plan_main():
tasks = models.Task()
all_tasks = tasks.all_tasks()
return render_template('plan.html', all_tasks=all_tasks)
@app.route('/add')
def add_form():
return render_template('add_task.html')
@app.route('/add_task', methods=['GET', 'POST'])
def add_task():
if not session.get('logged_in'):
abort(401)
tasks = models.Task()
tasks.insert_task(request.form['title'], request.form['note'],
request.form['month'], request.form['day'],
request.form['year'], request.form['priority'])
flash('New entry was successfully posted')
return redirect(url_for('plan_main'))
@app.route('/delete/<val>')
def del_task(val):
tasks = models.Task()
tasks.del_task(val)
flash('deleted')
return redirect(url_for('plan_main'))
@app.route('/edit/<val>')
def edit_form(val):
task = models.QueryTask(val)
split_date = task.due_date['due_date'].split('-')
year = split_date[0]
month = split_date[1]
day = split_date[2]
return render_template('edit.html', task=task, year=s_year,
month=s_month, day=s_day)
@app.route('/edit_task/<val>', methods=['GET', 'POST'])
def edit_task(val):
tasks = models.Task()
tasks.edit_task(request.form['title'], request.form['note'],
request.form['due_date'], request.form['priority'], val)
flash('Entry was successfully edited')
return redirect(url_for('plan_main'))
@app.route('/done_task/<val>')
def done_task(val):
task = models.Task()
task.done_task(val)
return redirect(url_for('plan_main'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid Username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid Password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('plan_main'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| null |
main.py
|
main.py
|
py
| 3,301 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlite3.dbapi2.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlite3.dbapi2",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "contextlib.closing",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "flask.g.db.close",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "models.QueryTask",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "flask.session.pop",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 112,
"usage_type": "call"
}
] |
94038226
|
#!/usr/bin/env python3
import requests
import time
import toml
MIDDLEWARE_URL="http://127.0.0.1:8888"
START_INDEX=0
def call_middleware(method,params):
rpc = {'method':method,'params':params}
rsp = requests.post(MIDDLEWARE_URL,json=rpc).json()
if 'Ok' in rsp:
return rsp['Ok']
else:
raise Exception(rsp)
def main():
start_nonce = hex(START_INDEX + 1)
print(f'Loading available logs from {start_nonce}...')
seen = 0
for i in range(START_INDEX,(START_INDEX + 256)):
time.sleep(0.1)
nonce = hex(i + 1)
event_rsp = call_middleware('get-events',{'nonce':nonce})
if event_rsp is None:
break
else:
seen += 1
print(f'event {nonce}: {event_rsp}')
if seen == 0:
print('Unable to acquire any logs at this time...')
if __name__ == '__main__':
main()
| null |
example/example-logs.py
|
example-logs.py
|
py
| 886 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
}
] |
187251734
|
import numpy as np
import pandas as pd
from keras import models, layers, Input
import matplotlib.pyplot as plt
import keras
################################################ Data process
#################### standardization
path = "./features.xlsx"
out = pd.read_excel(path,index_col=0)
Max_out = np.max(out.values)
Min_out = np.min(out.values)
range_out = Max_out - Min_out
out_standard = out / range_out
print(Max_out,Min_out)
#################### hyperparameter
intervals = out.shape[0] # 6640 days
pre = 12 # 12intervals
begin = 13 # start from 14th
interval_batch=35 # batch_size
n = 118 # number of stations
#################### samples and labels
labels = []
samples_lstm_recent = []
for i in range(begin*interval_batch,intervals):
# labels
label = out_standard['坪洲'].values[i]
# lstm
sample_lstm_recent = out_standard['坪洲'].values[i-pre:i]
samples_lstm_recent.append(sample_lstm_recent)
labels.append(label)
num_samples = len(labels)
samples_lstm_recent = np.reshape(np.array(samples_lstm_recent),(num_samples,pre,1))
labels = np.array(labels)
print(samples_lstm_recent.shape)
print(labels.shape)
#################### train and test split
x_train_lstm_recent = samples_lstm_recent[:4000]
x_test_lstm_recent = samples_lstm_recent[4000:]
y_train = labels[:4000]
y_test = labels[4000:]
print(x_train_lstm_recent.shape,y_train.shape)
print(x_test_lstm_recent.shape,y_test.shape)
################################################ Model:GradientBoost
input110 = Input(shape=(pre,1), dtype='float')
input111 = layers.GRU(35,return_sequences=True)(input110)
input112 = layers.GRU(pre)(input111)
output11 = layers.Dense(1,activation='relu')(input112)
model = models.Model(inputs=[input110],outputs=[output11])
model.summary()
model.compile(optimizer='rmsprop',loss='mae',metrics=['mae','mse','mape'])
callbacks_list = [
keras.callbacks.EarlyStopping(
monitor='mae',
patience=10,),
keras.callbacks.ModelCheckpoint(filepath='GRU.h5',
monitor='val_loss',
save_best_only=True,)
]
################################################ Training
epochs = 1000
H = model.fit([x_train_lstm_recent], y_train,callbacks=callbacks_list,batch_size=interval_batch,epochs=epochs,validation_data=([x_test_lstm_recent],y_test))
################################################ Loss
train_loss = H.history['loss']
test_loss = H.history['val_loss']
iterations = [i for i in range(epochs)]
plt.plot(iterations, train_loss,'b-',label='train_mae')
plt.plot(iterations, test_loss,'r-',label='test_mae')
plt.legend()
plt.title('Train_mae VS Test_mae')
################################################ Predict
path="./GRU.h5"
model_best = models.load_model(path)
model.summary()
predict_best = model_best.predict(x_test_lstm_recent)
| null |
baselines/GRU.py
|
GRU.py
|
py
| 2,904 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_excel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.Input",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "keras.layers.GRU",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "keras.models.Model",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "keras.callbacks.EarlyStopping",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "keras.callbacks",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "keras.callbacks",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "keras.models.load_model",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 83,
"usage_type": "name"
}
] |
191282519
|
"""
This is an implementation of a simple neural network that makes use of PyTorch's autograd
do do backprop. We have a single hidden layer networn that should be able to fit the XOR function.
This exmaple is given in deeplearningbook.org
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleDNNModel(nn.Module):
"""
Simple DNN Model to showcase how autograd works.
"""
def __init__(self):
super(SimpleDNNModel, self).__init__()
self.fc1 = nn.Linear(2, 2, bias=True)
self.fc2 = nn.Linear(2, 1, bias=True)
# Custom weight initialization
# If we don't do this, the network gets stuck at non-optimal stationary points
# (e.g. all zeros). To keep it simple, we can manually initialize the network
# at a point close to the global optimum so that it goes toward the correct
# stationary point.
with torch.no_grad():
self.fc1.weight = nn.Parameter(data=torch.tensor([
[1.1, 1],
[1, 1.1]
]).float())
self.fc1.bias = nn.Parameter(data=torch.tensor([0, -1]).float())
self.fc2.weight = nn.Parameter(data=torch.tensor([
[1.1, -2.1]
]).float())
self.fc2.bias = nn.Parameter(data=torch.tensor(0).float())
def forward(self, x):
"""
Do a forward computation using the input x
"""
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = SimpleDNNModel()
"""
Let's set up our training data so that the network learns the XOR function
Our only goal is to fit the training data perfectly.
(X, y) is the trianing data.
X contains inputs for the network
y contains true class labels.
"""
X = torch.tensor([[0, 0],
[1, 0],
[0, 1],
[1, 1]]).float()
y = torch.tensor([[0],
[1],
[1],
[0]]).float()
PRINT_DETAILS = False
# Update the weights of the network
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.01)
criterion = nn.MSELoss()
for iteration in range(1000):
optimizer.zero_grad()
output = net(X)
if PRINT_DETAILS:
print("--------- Beginning Iteration {0} --------".format(iteration))
print("fc1.weight =", net.fc1.weight)
print("fc1.bias =", net.fc1.bias)
print("fc2.weight =", net.fc2.weight)
print("fc2.bias =", net.fc2.bias)
print("output =", output)
loss = criterion(output, y)
print("MSE Loss =", loss)
loss.backward()
optimizer.step()
| null |
egs/simple_nnet.py
|
simple_nnet.py
|
py
| 2,582 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.optim.SGD",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
}
] |
445395456
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@client.event
async def on_ready():
CHANNEL_ID = 676772811595055136 # 任意のチャンネルID(int)
channel = client.get_channel(CHANNEL_ID)
await channel.send('おはよう!')
@bot.command()
async def ping(ctx):
await ctx.send('norple')
bot.run(token)
| null |
discordbot.py
|
discordbot.py
|
py
| 642 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Bot",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "traceback.TracebackException.from_exception",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "traceback.TracebackException",
"line_number": 12,
"usage_type": "attribute"
}
] |
301942713
|
from django.shortcuts import render
from .models import Check
from django.contrib import messages
# Create your views here.
def checkup(request):
if request.method == 'POST':
message_2 = request.POST['dry cough']
if request.POST['dry cough'] == "yes":
dry_cough = True
else:
dry_cough = False
if request.POST['tiredness'] == "yes":
tiredness = True
else:
tiredness = False
if request.POST['fever'] == "yes":
fever = True
else:
fever = False
other_symptoms = int(request.POST['other_symptoms'])
if request.POST['chest-pain'] == "yes":
chest_pain = True
else:
chest_pain = False
if request.POST['breathing'] == "yes":
breathing = True
else:
breathing = False
a = Check.objects.create(
cough=dry_cough, fever=fever, Tiredness=tiredness,
chest_pain=chest_pain, breathing_problem=breathing,
other_symptoms=other_symptoms)
if Check.is_severe(a):
a.save()
display = "you need to get tested"
messages.error(request, "You need to get tested")
else:
display = "You seem to be fine"
return render(request, 'covidcheck/checkpage.html', {'display': display, 'message': messages})
else:
return render(request, 'covidcheck/mainpage.html')
| null |
Frosthack/covidcheck/views.py
|
views.py
|
py
| 1,475 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Check.objects.create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Check.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.Check",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.Check.is_severe",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Check",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 44,
"usage_type": "call"
}
] |
374410032
|
import pyrebase
import time
config = {
"apiKey": "AIzaSyBd2BQuZexuu5IOUqHebir6-OOxEarTQmg",
"authDomain": "hackthenorth-8a3e2.firebaseapp.com",
"databaseURL": "https://hackthenorth-8a3e2.firebaseio.com",
"storageBucket": "hackthenorth.appspot.com"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
user = auth.sign_in_with_email_and_password("[email protected]", "esoots1")
db = firebase.database()
# Temporarily replace quote function
def noquote(s):
return s
pyrebase.pyrebase.quote = noquote
def store_detect_info(data, user_id):
data['date'] = time.time()
db.child("users").child(user_id).push(data, user['idToken'])
return True
# for showing demo
def get_detect_info(user_id):
return db.child("users").child(user_id).get(user['idToken']).val()
def get_initial_info(user_id):
response = db.child("users").child(user_id).order_by_child("date").get(user['idToken']).val()
return sorted(response.values(), key=lambda x: -x['date'])[-1]
def get_last_blinking_time(user_id):
try:
response = db.child("users").child(user_id).order_by_child("status").equal_to(1).get(user['idToken']).val()
return sorted(response.values(), key=lambda x: x['date'])[-1]['date']
except:
return time.time()
def get_last_not_existing_time(user_id):
try:
response = db.child("users").child(user_id).order_by_child("valid").equal_to(0).get(user['idToken']).val()
return sorted(response.values(), key=lambda x: x['date'])[-1]['date']
except:
return time.time()
| null |
app/firebase_adapter.py
|
firebase_adapter.py
|
py
| 1,565 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyrebase.initialize_app",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyrebase.pyrebase",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
}
] |
26050957
|
import csv
from flask import Flask, render_template,request,redirect,url_for
import diseaseprediction
app = Flask(__name__)
with open('templates/Testing.csv', newline='') as f:
reader = csv.reader(f)
symptoms = next(reader)
symptoms = symptoms[:len(symptoms)-1]
@app.route('/', methods=['GET'])
def dropdown():
return render_template('default.html', symptoms=symptoms)
@app.route('/disease_predict', methods=['POST'])
def disease_predict():
selected_symptoms = []
if(request.form['Symptom1']!="") and (request.form['Symptom1'] not in selected_symptoms):
selected_symptoms.append(request.form['Symptom1'])
if(request.form['Symptom2']!="") and (request.form['Symptom2'] not in selected_symptoms):
selected_symptoms.append(request.form['Symptom2'])
if(request.form['Symptom3']!="") and (request.form['Symptom3'] not in selected_symptoms):
selected_symptoms.append(request.form['Symptom3'])
if(request.form['Symptom4']!="") and (request.form['Symptom4'] not in selected_symptoms):
selected_symptoms.append(request.form['Symptom4'])
if(request.form['Symptom5']!="") and (request.form['Symptom5'] not in selected_symptoms):
selected_symptoms.append(request.form['Symptom5'])
disease = diseaseprediction.dosomething(selected_symptoms)
return render_template('disease_predict.html',disease=disease,symptoms=symptoms)
if __name__ == '__main__':
app.run(debug=True)
| null |
app.py
|
app.py
|
py
| 1,508 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "diseaseprediction.dosomething",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 33,
"usage_type": "call"
}
] |
417368490
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import pytest
@pytest.fixture
def driver():
_driver = driver = webdriver.Chrome()
yield _driver
_driver.close()
def test_search_in_python_org(driver):
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element(by=By.NAME, value="q")
elem.send_keys("pytest")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
def test_fail_search_in_python_org(driver):
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element(by=By.NAME, value="q")
elem.send_keys("should_not_exist")
elem.send_keys(Keys.RETURN)
assert "No results found." in driver.page_source
| null |
selenium/selenium_tests.py
|
selenium_tests.py
|
py
| 837 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 28,
"usage_type": "name"
}
] |
275977471
|
# platform-apollo3blue: Apollo3Blue development platform for platformio.
# Copyright 2019-present NigelB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import isdir, join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
build_mcu = env.get("BOARD_MCU", board.get("build.mcu", ""))
env.ProcessFlags(board.get("build.framework.arduino.v2.extra_flags"))
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoapollo3")
assert isdir(FRAMEWORK_DIR)
BASE_CORE_DIR = join(FRAMEWORK_DIR, "cores")
CORE_DIR = join(BASE_CORE_DIR, "arduino")
MBED_DIR = join(FRAMEWORK_DIR, "cores", "mbed-os")
BRIDGE_DIR = join(CORE_DIR, "mbed-bridge")
TARGETS_DIR = join(MBED_DIR, "targets", "TARGET_Ambiq_Micro", "TARGET_Apollo3")
SDK_DIR = join(TARGETS_DIR, "sdk")
CMSIS_DIR = join(SDK_DIR, "CMSIS")
LIBRARY_DIR = join(FRAMEWORK_DIR, "libraries")
VARIANTS_DIR = join(FRAMEWORK_DIR, "variants")
BOARD_VARIANTS_DIR = join(VARIANTS_DIR, board.get("build.framework.arduino.v2.variant").replace("TARGET_", "", 1))
linker_scripts = {
"asb": "0xC000.ld",
"svl": "0x10000.ld",
"jlink": "0x10000.ld",
}
#upload_protocol = board.get("upload.protocol")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
linker_script = linker_scripts[upload_protocol]
if upload_protocol == "jlink": upload_protocol = "svl"
TOOLS_DIR = join(FRAMEWORK_DIR, "tools")
EEPROM_LIB_DIR = join(LIBRARY_DIR, "EEPROM", "src")
PDM_LIB_DIR = join(LIBRARY_DIR, "PDM", "src")
RTC_LIB_DIR = join(LIBRARY_DIR, "RTC", "src")
SERVO_LIB_DIR = join(LIBRARY_DIR, "Servo", "src")
SOFTWARESERIAL_LIB_DIR = join(LIBRARY_DIR, "SoftwareSerial", "src")
SPI_LIB_DIR = join(LIBRARY_DIR, "SPI", "src")
WIRE_LIB_DIR = join(LIBRARY_DIR, "Wire", "src")
WDT_LIB_DIR = join(LIBRARY_DIR, "WDT", "src")
BURSTMODE_LIB_DIR = join(LIBRARY_DIR, "BurstMode", "src")
# Set parameters for CheckUploadSize in platformio/builder/tools/pioupload.py
env.Replace(
SIZEPROGREGEXP=r"^(?:\.text)\s+([0-9]+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss)\s+([0-9]+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
)
env.Append(
ASFLAGS=[
"-c", "-g", "-MMD",
"-x", "assembler-with-cpp",
],
CFLAGS=[
"-MMD",
"-include", join(BOARD_VARIANTS_DIR, "mbed", "mbed_config.h"),
#"-include", join(CORE_DIR, "sdk", "ArduinoSDK.h"),
"-iprefix{}/".format(BASE_CORE_DIR),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".c-flags"),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".includes"),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".c-symbols"),
"-I%s"%EEPROM_LIB_DIR,
"-I%s"%PDM_LIB_DIR,
"-I%s"%RTC_LIB_DIR,
"-I%s"%SERVO_LIB_DIR,
"-I%s"%SOFTWARESERIAL_LIB_DIR,
"-I%s"%SPI_LIB_DIR,
"-I%s"%WIRE_LIB_DIR,
"-I%s"%WDT_LIB_DIR,
"-I%s"%BURSTMODE_LIB_DIR,
],
CPPFLAGS=[
# "-w", "-x", "c++", "-E", "-CC"
],
CXXFLAGS=[
"-MMD",
"-include", join(BOARD_VARIANTS_DIR, "mbed", "mbed_config.h"),
"-include", join(CORE_DIR, "sdk", "ArduinoSDK.h"),
"-iprefix{}/".format(BASE_CORE_DIR),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".cxx-flags"),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".includes"),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".cxx-symbols"),
"-I%s"%EEPROM_LIB_DIR,
"-I%s"%PDM_LIB_DIR,
"-I%s"%RTC_LIB_DIR,
"-I%s"%SERVO_LIB_DIR,
"-I%s"%SOFTWARESERIAL_LIB_DIR,
"-I%s"%SPI_LIB_DIR,
"-I%s"%WIRE_LIB_DIR,
"-I%s"%WDT_LIB_DIR,
"-I%s"%BURSTMODE_LIB_DIR,
],
CPPDEFINES=[
"MBED_NO_GLOBAL_USING_DIRECTIVE",
("ARDUINO", "10811"),
"ARDUINO_ARCH_APOLLO3",
"ARDUINO_ARCH_MBED",
"MBED_NO_GLOBAL_USING_DIRECTIVE",
"CORDIO_ZERO_COPY_HCI",
] ,
CPPPATH=[
CORE_DIR,
BOARD_VARIANTS_DIR,
BRIDGE_DIR,
join(BRIDGE_DIR, "core-api"),
join(BRIDGE_DIR, "core-api", "api", "deprecated"),
],
LINKFLAGS=[
"-T%s" % join(TOOLS_DIR, "uploaders", upload_protocol, linker_script),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".ld-flags"),
join("@{}".format(BOARD_VARIANTS_DIR), "mbed", ".ld-symbols"),
"-Wl,--whole-archive",
join("{}".format(BOARD_VARIANTS_DIR), "mbed", "libmbed-os.a"),
"-Wl,--no-whole-archive",
"-Wl,-Map=%s" % join("$BUILD_DIR", "program.map"),
# "--specs=nosys.specs",
"--specs=nano.specs",
],
LIBS=["stdc++", "supc++", "libmbed-os.a", "arm_cortexM4lf_math", "m"],
LIBPATH=[
join(BOARD_VARIANTS_DIR, "mbed"),
join(CMSIS_DIR, "ARM", "Lib", "ARM")
],
LIBSOURCE_DIRS=[LIBRARY_DIR]
)
libs = []
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "variant"),
BOARD_VARIANTS_DIR
))
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "mbed_bridge"),
BRIDGE_DIR,
))
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "core-implement"),
join(CORE_DIR, "sdk", "core-implement"),
))
env.Prepend(LIBS=libs)
| null |
builder/frameworks/arduino/core_v2.py
|
core_v2.py
|
py
| 5,668 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "SCons.Script.DefaultEnvironment",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 176,
"usage_type": "call"
}
] |
117358666
|
from django.shortcuts import render
from django.shortcuts import redirect
from todo.forms import TodoForm
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
@login_required()
def todo_view(request):
if request.POST:
form = TodoForm(request.POST)
if form.is_valid():
todo = form.save(commit=False)
todo.Username = request.user
todo.save()
return redirect(reverse('hub.views.hub_view'))
else:
return render(request, 'Todo.html', {'form': TodoForm})
else:
return render(request, 'Todo.html',{'form':TodoForm})
| null |
todo/views.py
|
views.py
|
py
| 660 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "todo.forms.TodoForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "todo.forms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "todo.forms.Username",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "todo.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "todo.forms.save",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "todo.forms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "todo.forms.TodoForm",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "todo.forms.TodoForm",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 7,
"usage_type": "call"
}
] |
59701939
|
"""
Utility functions.
"""
import os
import sqlite3
import csv
def connection(db_filename):
"""
Factory for sqlite3.Connection objects.
"""
connection = sqlite3.connect(db_filename)
connection.row_factory = sqlite3.Row
return connection
def schema():
"""
Return the location of the SQL schema.
"""
return os.path.join(os.path.dirname(__file__), "schema.sql")
def ingest(csvfile, db_filename):
"""
Read a file of quotes and populate the database.
"""
conn = connection(db_filename)
c = conn.cursor()
with open(csvfile, "r", newline="") as quotes:
reader = csv.DictReader(quotes)
for row in reader:
c.execute(
"INSERT INTO quotes (quote, author) VALUES (?, ?)",
(row['quote'], row['author']))
conn.commit()
def init(db_filename):
"""
Create the database schema.
"""
conn = connection(db_filename)
c = conn.cursor()
with open(schema(), "r") as schema_fp:
queries = schema_fp.read()
c.executescript(queries)
conn.commit()
| null |
src/random_quote/util.py
|
util.py
|
py
| 1,169 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 33,
"usage_type": "call"
}
] |
609550282
|
import pygame as p
import random as r
p.init#초기화
s=(800,400)
white=(255,255,255)
sc=p.display.set_mode(s)#해상도(가로,세로)
p.display.set_caption("선생님의 요구")
playing = True
font=p.font.SysFont('malgungothic',20)
sco=o
do=p.image.load("q.png")
do_l=[]
for x in range(20):
do_r= do.get_rect(left=r.randint(0,720), top=r.randint(0,350))#left=x,top=y
do_l.append(do_r)
while playing:
for event in p.event.get():
if event.type == p.QUIT:
playing =False
p.quit()
quit()
if event.type ==p.MOUSEBUTTONDOWN:
for do_r in do_l:
if do_r.collidepoint(event.pos[0],event.pos[1]):
sco= sco + 1
do_l.remove(do_r)
do_r= do.get_rect(left=r.randint(0,720), top=r.randint(0,350))#left=x,top=y
do_l.append(do_r)
sc.fill(white)
for do_r in do_l:
sc.blit(do,do_r)
t=font.rander("점수:[]".format(sco),True,(0,0,0))
sc.blit(t,(310,0))
p.display.flip()
| null |
200617좀슈.py
|
200617좀슈.py
|
py
| 1,175 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 45,
"usage_type": "attribute"
}
] |
332029625
|
'''
Vincent Lin
SoftDev pd7
K25 -- Getting More REST
2018-11-14
'''
from flask import Flask, render_template
import urllib, json
import ssl
app = Flask(__name__)
@app.route("/")
def index():
context = ssl._create_unverified_context()
x = urllib.request.urlopen("https://dog.ceo/api/breeds/image/random", context = context)
stuff = x.read()
data = json.loads(stuff)
return render_template("index.html", stuff=data["message"])
if __name__ == "__main__":
app.debug = True
app.run()
| null |
25_rest/app.py
|
app.py
|
py
| 511 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
}
] |
154757157
|
import numpy as np
import pandas as pd
from simulator import simulate
from sklearn import metrics
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from voting_regressor import VotingRegressor
np.random.seed(123456)
lr = SVR()
data = pd.read_csv('BTC-USD.csv')
data = data.dropna()
data.Date = pd.to_datetime(data.Date)
data.set_index('Date', drop=True, inplace=True)
diffs = (data.Close.diff()/data.Close).values[1:]
diff_len = len(diffs)
base_learners = [('SVR', SVR()),
('LR', LinearRegression()),
('KNN', KNeighborsRegressor())]
lr = VotingRegressor(base_learners)
def create_x_data(lags=1):
diff_data = np.zeros((diff_len, lags))
ma_data = np.zeros((diff_len, lags))
diff_ma = (data.Close.diff()/data.Close).rolling(15).mean().fillna(0).values[1:]
for lag in range(1, lags+1):
this_data = diffs[:-lag]
diff_data[lag:, lag-1] = this_data
this_data = diff_ma[:-lag]
ma_data[lag:, lag-1] = this_data
return np.concatenate((diff_data, ma_data), axis=1)
x_data = create_x_data(lags=20)*100
y_data = diffs*100
# REPRODUCIBILITY
x_data = np.around(x_data, decimals=8)
y_data = np.around(y_data, decimals=8)
# =============================================================================
# WALK FORWARD
# =============================================================================
window = 150
preds = np.zeros(diff_len-window)
for i in range(diff_len-window-1):
x_train = x_data[i:i+window, :]
y_train = y_data[i:i+window]
lr.fit(x_train, y_train)
preds[i] = lr.predict(x_data[i+window+1, :].reshape(1, -1))
print('Percentages MSE: %.2f'%metrics.mean_squared_error(y_data[window:], preds))
simulate(data, preds)
| null |
Chapter10/voting.py
|
voting.py
|
py
| 1,872 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsRegressor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "voting_regressor.VotingRegressor",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "simulator.simulate",
"line_number": 63,
"usage_type": "call"
}
] |
590050564
|
import maya.OpenMaya as om
import maya.cmds as cmds
# total number of vertices
numVertices = 6
# the total number of polygon faces
numPolygons = 2
# the vert positions
vertices = [ (0,0,0), (1,0,0), (1,1,0), (0,1,0),
(0,0,-1), (0,1,-1)
]
# the vert connections
conns = [ 0,1,2,3,
1,4,5,2,
#4,0,3,5
]
# the uv positions
uv_coords = [ (0,0), (1,0), (1,1), (0,1),
(1,0), (2,0), (2,1), (1,1)
]
# uv connections (which vert to assign uv to)
uv_ids = [ 0,1,2,3,
1,4,5,2
]
# the vert connections
pConnects = om.MIntArray()
for c in conns:
pConnects.append(c)
# the vert count per face
pCounts = om.MIntArray()
for o in range(numPolygons):
pCounts.append(4)
# the vert positions
vertexArray = om.MFloatPointArray()
for i in range(0, len(vertices)):
pt = om.MFloatPoint(vertices[i][0],vertices[i][1], vertices[i][2])
vertexArray.append(pt)
# the uv count per face
uvCounts = om.MIntArray()
for o in range(numPolygons):
uvCounts.append(4)
# create u and v position array
uArray = om.MFloatArray()
vArray = om.MFloatArray()
for i in range(numVertices):
uArray.append(uv_coords[i][0])
vArray.append(uv_coords[i][1])
# uv ids (vert id)
uvIds = om.MIntArray()
for c in uv_ids:
uvIds.append(c)
# create a mesh function set
meshFn = om.MFnMesh()
# create a mesh
#mesh = meshFn.create(numVertices, numPolygons, vertexArray, pCounts, pConnects, uArray, vArray)
mesh = meshFn.create(numVertices, numPolygons, vertexArray, pCounts, pConnects)
# assign the uvs
#meshFn.assignUVs(uvCounts, uvIds)
| null |
petfactory/modelling/mesh/dev/wrap_around_triangle_uv.py
|
wrap_around_triangle_uv.py
|
py
| 1,685 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "maya.OpenMaya.MIntArray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MIntArray",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFloatPointArray",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFloatPoint",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MIntArray",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFloatArray",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFloatArray",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MIntArray",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnMesh",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 74,
"usage_type": "name"
}
] |
346916658
|
#!/usr/local/env python
"""
Test the ILCDIRAC job module
"""
from __future__ import print_function
import types
import unittest
from mock import patch, MagicMock as Mock
from DIRAC import S_OK, S_ERROR
from ILCDIRAC.Interfaces.API.NewInterface.Job import Job
from ILCDIRAC.Tests.Utilities.GeneralUtils import assertEqualsImproved, assertInImproved, assertDiracFailsWith, \
assertDiracSucceeds, assertDiracSucceedsWith_equals
__RCSID__ = "$Id$"
MODULE_NAME = 'ILCDIRAC.Interfaces.API.NewInterface.Job'
#pylint: disable=protected-access,too-many-public-methods
class JobTestCase( unittest.TestCase ):
""" Test the ILCDIRAC Job class
"""
def setUp(self):
"""set up the objects"""
import DIRAC
objectLoaderInst = Mock(name="ObjectLoader")
objectLoaderInst.loadObject.return_value = S_OK(lambda: S_OK(['x86_64-slc5-gcc43-opt']))
olMock = Mock(name="ObjectLoaderModule", return_value=objectLoaderInst)
with patch.object(DIRAC.Interfaces.API.Job, 'ObjectLoader', new=olMock):
self.job = Job('')
self.job.check = True
def test_setsysconf(self):
self.job.getDIRACPlatforms = Mock(return_value=S_OK(['myTestPlatform']))
self.job.setPlatform('myTestPlatform')
self.assertFalse(self.job.errorDict)
def test_unimplemented_methods( self ):
self.job.setInputData('')
self.job.setInputSandbox('')
self.job.setOutputData('')
self.job.setOutputSandbox('')
self.job.submit()
assertEqualsImproved( len(self.job.errorDict), 5, self )
expected_failures = [ 'setInputData', 'setInputSandbox', 'setOutputData', 'setOutputSandbox', 'submit' ]
for method in expected_failures:
assertInImproved( method, self.job.errorDict.keys(), self )
def test_ignoreapperrors( self ):
assertDiracSucceeds( self.job.setIgnoreApplicationErrors(), self )
assertEqualsImproved( self.job.workflow.parameters[-1].name, 'IgnoreAppError', self )
def test_checkparams( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_OK('')
app_mock._addParametersToStep.return_value = S_OK('')
app_mock._setStepParametersValues.return_value = S_OK('')
app_mock._resolveLinkedStepParameters.return_value = S_OK('')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracSucceedsWith_equals( self.job.checkparams( dirac_mock ), 'dirac_test_retval', self )
assertEqualsImproved( self.job.workflow.parameters[-1].name, 'TotalSteps', self )
def test_checkparams_nodirac( self ):
assertDiracFailsWith( self.job.checkparams(), 'missing dirac', self )
def test_checkparams_analyse_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_ERROR('analyse_test_Err')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'analyse_test_err', self )
def test_checkparams_consistencycheck_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_ERROR('consistency_check_fails')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'failed to check its consistency', self )
def test_checkparams_jobspecificmodules_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_OK('')
app_mock._addParametersToStep.side_effect = IOError('dont call me')
app_mock._userjobmodules.return_value = S_ERROR('test_err')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'failed to add module', self )
def test_checkparams_addparam_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_OK('')
app_mock._addParametersToStep.return_value = S_ERROR('adding of parameters failed')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'failed to add parameters', self )
def test_checkparams_resolveparams_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_OK('')
app_mock._addParametersToStep.return_value = S_OK('')
app_mock._setStepParametersValues.return_value = S_ERROR('resolving_failed_testerr')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'failed to resolve parameters value', self )
def test_checkparams_resolvelinks_fails( self ):
app_mock = Mock()
app_mock.appname = 'myCoolTestApp'
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkWorkflowConsistency.return_value = S_OK('')
app_mock._addParametersToStep.return_value = S_OK('')
app_mock._setStepParametersValues.return_value = S_OK('')
app_mock._resolveLinkedStepParameters.return_value = S_ERROR('resolve_links_failed')
dirac_mock = Mock()
dirac_mock.checkparams.return_value = S_OK('dirac_test_retval')
self.job.applicationlist = [ app_mock ]
assertDiracFailsWith( self.job.checkparams( dirac_mock ), 'failed to resolve linked parameters', self )
def test_askuser( self ):
with patch('%s.promptUser' % MODULE_NAME, new=Mock(return_value=S_OK(''))):
self.job.applicationlist = [ Mock() ]
assertDiracSucceeds( self.job._askUser(), self )
def test_askuser_nocheck( self ):
with patch('%s.promptUser' % MODULE_NAME, new=Mock(return_value=S_ERROR(''))):
self.job.check = False
assertDiracSucceeds( self.job._askUser(), self )
def test_askuser_novalidation( self ):
self.job.applicationlist = []
with patch('%s.promptUser' % MODULE_NAME, new=Mock(return_value=S_ERROR(''))):
assertDiracFailsWith( self.job._askUser(), 'user did not validate', self )
def test_askuser_validation_denied( self ):
self.job.applicationlist = [ Mock(), Mock() ]
with patch('%s.promptUser' % MODULE_NAME, new=Mock(return_value=S_OK('n'))):
assertDiracFailsWith( self.job._askUser(), 'user did not validate', self )
def test_append( self ):
app_mock = Mock()
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkConsistency.return_value = S_OK('')
app_mock._checkFinalConsistency.return_value = S_OK('')
assertDiracSucceeds( self.job.append( app_mock ), self )
def test_append_analysis_fails( self ):
app_mock = Mock()
app_mock._analyseJob.return_value = S_ERROR('analysis failed, sorry. this is a test')
assertDiracFailsWith( self.job.append( app_mock ), 'analysis failed, sorry', self )
def test_append_checkconsistency_fails( self ):
app_mock = Mock()
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkConsistency.return_value = S_ERROR('consistency check test failed')
assertDiracFailsWith( self.job.append( app_mock ), 'failed to check its consistency', self )
def test_append_finalconsistency_fails( self ):
app_mock = Mock()
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkConsistency.return_value = S_OK('')
app_mock._checkFinalConsistency.return_value = S_ERROR('final consistency invalid')
assertDiracFailsWith( self.job.append( app_mock ), 'failed to check its consistency', self )
def test_append_jobspecificenergy_wrong( self ):
self.job.energy = 2451
app_mock = Mock()
app_mock.energy = 214
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkConsistency.return_value = S_OK('')
app_mock._checkFinalConsistency.side_effect = IOError('dont call me')
assertDiracFailsWith( self.job.append( app_mock ), 'failed job specific checks', self )
def test_append_other_case( self ):
app_mock = Mock()
app_mock.numberOfEvents = 0
app_mock.appname = ''
app_mock.inputSB = [ 'inputsandbox1TestMe', 'other_sandbox', '' ]
self.job.inputsandbox = [ 'other_sandbox' ]
app_mock._analyseJob.return_value = S_OK('')
app_mock._checkConsistency.return_value = S_OK('')
app_mock._checkFinalConsistency.return_value = S_OK('')
assertDiracSucceeds( self.job.append( app_mock ), self )
assertEqualsImproved( self.job.inputsandbox, [ 'other_sandbox', 'inputsandbox1TestMe', '' ], self )
def test_addsoftware( self ):
param_mock = Mock()
param_mock.getValue.return_value = 'myapp;myappnamtest.testv3/2'
with patch.object(self.job.workflow, 'findParameter', new=Mock(return_value=param_mock)):
self.job._addSoftware( 'myAppNamTest', 'testv3/2' )
assertEqualsImproved( self.job.workflow.parameters[-1].name, 'SoftwarePackages', self )
def test_addsoftware_addApp( self ):
param_mock = Mock()
param_mock.getValue.return_value = 'notMyApp'
with patch.object(self.job.workflow, 'findParameter', new=Mock(return_value=param_mock)):
self.job._addSoftware( 'myAppNamTest', 'testv3/2' )
assertEqualsImproved( self.job.workflow.parameters[-1].name, 'SoftwarePackages', self )
#pylint: disable=protected-access
class InternalJobTestCase( unittest.TestCase ):
""" Test the methods of the Job class that require a IntrospectJob instance
"""
def setUp(self):
"""set up the objects"""
import DIRAC
with patch.object(DIRAC.ConfigurationSystem.Client.Helpers.Resources, 'getDIRACPlatforms', return_value=S_OK(['x86_64-slc5-gcc43-opt'])):
self.job = IntrospectJob( '' )
self.job.check = True
def test_checkargs_1( self ):
self.job.indirection_for_checkArgs( 246, types.IntType )
self.assertFalse( self.job.errorDict )
def test_checkargs_2( self ):
self.job.indirection_for_checkArgs( 'bla', types.IntType )
self.assertTrue( self.job.errorDict )
def test_checkargs_3( self ):
self.job.indirection_for_checkArgs( { True : 'blabla' }, types.DictType )
self.assertFalse( self.job.errorDict )
def test_checkargs_4( self ):
self.job.indirection_for_checkArgs( False, types.DictType )
self.assertTrue( self.job.errorDict )
def test_checkargs_5( self ):
self.job.indirection_for_checkArgs( True, types.BooleanType )
self.assertFalse( self.job.errorDict )
def test_checkargs_6( self ):
self.job.indirection_for_checkArgs( {}, types.BooleanType )
self.assertTrue( self.job.errorDict )
def test_checkargs_7( self ):
self.job.indirection_for_checkArgs( [ True, 129, '' ], types.ListType )
self.assertFalse( self.job.errorDict )
def test_checkargs_8( self ):
self.job.indirection_for_checkArgs( 246, types.ListType )
self.assertTrue( self.job.errorDict )
def test_checkargs_9( self ):
self.job.indirection_2_for_checkArgs( 1, types.IntType )
self.assertTrue( self.job.errorDict )
def test_getargsdict( self ):
my_arg_dict = self.job.indirection_for_getargsdict( arg1=1, arg2=True, arg3='mystring' )
assertEqualsImproved( my_arg_dict, { 'arg1' : 1, 'arg2' : True, 'arg3' : 'mystring' }, self )
class IntrospectJob( Job ):
""" Used to easily test the introspective methods (e.g. _checkArgs)
"""
def __init__( self, script = None ):
import DIRAC
objectLoaderInst = Mock(name="ObjectLoader")
objectLoaderInst.loadObject.return_value = S_OK(lambda: S_OK(['x86_64-slc5-gcc43-opt']))
olMock = Mock(name="ObjectLoaderModule", return_value=objectLoaderInst)
with patch.object(DIRAC.Interfaces.API.Job, 'ObjectLoader', new=olMock):
super(IntrospectJob, self).__init__(script)
self.getDIRACPlatforms = Mock(return_value=S_OK(['x86_64-slc5-gcc43-opt']))
def indirection_for_checkArgs( self, arg_to_check, argtype ):
""" Method that uses the _checkArgs method so it can be tested.
"""
self._checkArgs( { 'arg_to_check' : argtype } )
print(arg_to_check)
def indirection_2_for_checkArgs( self, arg_to_check, argtype ):
""" Method that uses the _checkArgs method so it can be tested.
"""
# Intentional 'typo'
self._checkArgs( { 'arg_t_check' : argtype } )
print(arg_to_check)
def indirection_for_getargsdict( self, arg1, arg2, arg3 ):
""" Method that uses the getArgsDict method so it can be tested.
"""
print('%s %s %s' % (arg1, arg2, arg3))
return self._getArgsDict()
| null |
Interfaces/API/NewInterface/Tests/Test_Job.py
|
Test_Job.py
|
py
| 13,070 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "mock.MagicMock",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "DIRAC.Interfaces",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "ILCDIRAC.Interfaces.API.NewInterface.Job.Job",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertInImproved",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceeds",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceedsWith_equals",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceeds",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceeds",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceeds",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_ERROR",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracFailsWith",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertDiracSucceeds",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "DIRAC.ConfigurationSystem",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "types.IntType",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "types.IntType",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "types.DictType",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "types.DictType",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "types.BooleanType",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "types.BooleanType",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "types.ListType",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "types.ListType",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "types.IntType",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "ILCDIRAC.Tests.Utilities.GeneralUtils.assertEqualsImproved",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "ILCDIRAC.Interfaces.API.NewInterface.Job.Job",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "DIRAC.Interfaces",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "mock.MagicMock",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "DIRAC.S_OK",
"line_number": 281,
"usage_type": "call"
}
] |
528567117
|
import argparse
import glob
import os.path as osp
import pdb
from PIL import Image
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from tqdm import trange, tqdm
import numpy
import cv2
import matplotlib.pyplot as plt
import time
import shutil
import argparse
mhp_id2label = {0: 'Background',
1: 'head',
2: 'torso',
3: 'u-arms',
4: 'l-arms',
5: 'u-legs',
6: 'l-legs',
}
def get_palette(num_cls):
color=[[0, 0, 0],[128, 0, 0],[ 0, 128, 0],[128, 128, 0],[0, 0 , 128],[128 , 0 , 128],[0 , 128, 128]]
inds=[0,4,2,6,1,5,3]
return color,inds
def collect_files(text_dir,
img_dir,
out_dir):
files = []
print("collencting files")
flist = [line.strip() for line in open(text_dir).readlines()]
for add in tqdm(flist, desc='Loading %s ..' % ('val')):
img=osp.join(img_dir,add+'.jpg')
shutil.copy(osp.join(img_dir,add+'.jpg'),osp.join(out_dir,add+'.jpg'))
return None
def parse_args():
parser = argparse.ArgumentParser(
description='Convert mhp annotations to COCO format')
#parser.add_argument('mhp_path', help='mhp data path')
parser.add_argument('--Images', default='images', type=str)
parser.add_argument('--Categoriy-dir', default='Category_ids', type=str)
parser.add_argument('--Human-dir', default='Human_ids', type=str)
parser.add_argument('--Instance-dir', default='parsing_annos', type=str)
#parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
img_dir = 'data/pascal/JPEGImages'
text_dir = 'data/pascal/list/val_id.txt'
human_dir = 'data/pascal/Human_ids/'
cateory_dir = 'data/pascal/Categories'
out_dir = 'data/pascal/val/Images'
mmcv.mkdir_or_exist(out_dir)
with mmcv.Timer(
print_tmpl='It tooks {}s to convert MHP annotation'):
files = collect_files(
text_dir,
img_dir,
out_dir)
if __name__ == '__main__':
main()
| null |
sharefeaturekernel_12_pascal/tools/convert_datasets/pascal/copydata.py
|
copydata.py
|
py
| 2,259 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tqdm.tqdm",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "shutil.copy",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "mmcv.mkdir_or_exist",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "mmcv.Timer",
"line_number": 68,
"usage_type": "call"
}
] |
98150315
|
"""
Event API for inter-figure communication.
The event API allows figures to communicate with each other, such that a change
in one figure can trigger a change in another figure. For example, moving the
time cursor in one plot can update the current time in another plot. Another
scenario is two drawing routines drawing into the same window, using events to
stay in-sync.
Authors: Marijn van Vliet <[email protected]>
"""
import contextlib
from dataclasses import dataclass
from weakref import WeakKeyDictionary, WeakSet
import re
from ..utils import warn
# Global dict {fig: channel} containing all currently active event channels.
_event_channels = WeakKeyDictionary()
# The event channels of figures can be linked together. This dict keeps track
# of these links. Links are bi-directional, so if {fig1: fig2} exists, then so
# must {fig2: fig1}.
_event_channel_links = WeakKeyDictionary()
# Event channels that are temporarily disabled by the disable_ui_events context
# manager.
_disabled_event_channels = WeakSet()
# Regex pattern used when converting CamelCase to snake_case.
# Detects all capital letters that are not at the beginning of a word.
_camel_to_snake = re.compile(r"(?<!^)(?=[A-Z])")
# List of events
class UIEvent:
"""Abstract base class for all events.
Attributes
----------
source : matplotlib.figure.Figure | Figure3D
The figure that published the event.
"""
source = None
@property
def name(self):
"""The name of the event, which is the class name in snake case."""
return _camel_to_snake.sub("_", self.__class__.__name__).lower()
class FigureClosing(UIEvent):
"""Indicates that the user has requested to close a figure.
Attributes
----------
name : str
The name of the event: ``"figure_closing"``
source : matplotlib.figure.Figure | Figure3D
The figure that published the event.
"""
pass
@dataclass
class TimeChange(UIEvent):
"""Indicates that the user has selected a time.
Parameters
----------
time : float
The new time in seconds.
Attributes
----------
name : str
The name of the event: ``"time_change"``
source : matplotlib.figure.Figure | Figure3D
The figure that published the event.
time : float
The new time in seconds.
"""
time: float
def _get_event_channel(fig):
"""Get the event channel associated with a figure.
If the event channel doesn't exist yet, it gets created and added to the
global ``_event_channels`` dict.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure to get the event channel for.
Returns
-------
channel : dict[event -> list]
The event channel. An event channel is a list mapping string event
names to a list of callback representing all subscribers to the
channel.
"""
import matplotlib
# Create the event channel if it doesn't exist yet
if fig not in _event_channels:
# The channel itself is a dict mapping string event names to a list of
# subscribers. No subscribers yet for this new event channel.
_event_channels[fig] = dict()
# When the figure is closed, its associated event channel should be
# deleted. This is a good time to set this up.
def delete_event_channel(event=None):
"""Delete the event channel (callback function)."""
publish(fig, event=FigureClosing()) # Notify subscribers of imminent close
unlink(fig) # Remove channel from the _event_channel_links dict
if fig in _event_channels:
del _event_channels[fig]
# Hook up the above callback function to the close event of the figure
# window. How this is done exactly depends on the various figure types
# MNE-Python has.
if isinstance(fig, matplotlib.figure.Figure):
fig.canvas.mpl_connect("close_event", delete_event_channel)
else:
raise NotImplementedError("This figure type is not support yet.")
# Now the event channel exists for sure.
return _event_channels[fig]
def publish(fig, event):
"""Publish an event to all subscribers of the figure's channel.
The figure's event channel and all linked event channels are searched for
subscribers to the given event. Each subscriber had provided a callback
function when subscribing, so we call that.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure that publishes the event.
event : UIEvent
Event to publish.
"""
if fig in _disabled_event_channels:
return
# Compile a list of all event channels that the event should be published
# on.
channels = [_get_event_channel(fig)]
links = _event_channel_links.get(fig, None)
if links is not None:
for linked_fig, event_names in links.items():
if event_names == "all" or event.name in event_names:
channels.append(_get_event_channel(linked_fig))
# Publish the event by calling the registered callback functions.
event.source = fig
for channel in channels:
if event.name not in channel:
channel[event.name] = set()
for callback in channel[event.name]:
callback(event=event)
def subscribe(fig, event_name, callback):
"""Subscribe to an event on a figure's event channel.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure of which event channel to subscribe.
event_name : str
The name of the event to listen for.
callback : callable
The function that should be called whenever the event is published.
"""
channel = _get_event_channel(fig)
if event_name not in channel:
channel[event_name] = set()
channel[event_name].add(callback)
def unsubscribe(fig, event_names, callback=None):
"""Unsubscribe from an event on a figure's event channel.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure of which event channel to unsubscribe from.
event_names : str | list of str
Select which events to stop subscribing to. Can be a single string
event name, a list of event names or ``"all"`` which will unsubscribe
from all events.
callback : callable | None
The callback function that should be unsubscribed, leaving all other
callback functions that may be subscribed untouched. By default
(``None``) all callback functions are unsubscribed from the event.
"""
channel = _get_event_channel(fig)
# Determine which events to unsubscribe for.
if event_names == "all":
if callback is None:
event_names = list(channel.keys())
else:
event_names = list(k for k, v in channel.items() if callback in v)
elif isinstance(event_names, str):
event_names = [event_names]
for event_name in event_names:
if event_name not in channel:
warn(
f'Cannot unsubscribe from event "{event_name}" as we have never '
"subscribed to it."
)
continue
if callback is None:
del channel[event_name]
else:
# Unsubscribe specific callback function.
subscribers = channel[event_name]
if callback in subscribers:
subscribers.remove(callback)
else:
warn(
f'Cannot unsubscribe {callback} from event "{event_name}" '
"as it was never subscribed to it."
)
if len(subscribers) == 0:
del channel[event_name] # keep things tidy
def link(fig1, fig2, event_names="all"):
"""Link the event channels of two figures together.
When event channels are linked, any events that are published on one
channel are simultaneously published on the other channel. Links are
bi-directional.
Parameters
----------
fig1 : matplotlib.figure.Figure | Figure3D
The first figure whose event channel will be linked to the second.
fig2 : matplotlib.figure.Figure | Figure3D
The second figure whose event channel will be linked to the first.
event_names : str | list of str
Select which events to publish across figures. By default (``"all"``),
both figures will receive all of each other's events. Passing a list of
event names will restrict the events being shared across the figures to
only the given ones.
"""
if event_names != "all":
event_names = set(event_names)
if fig1 not in _event_channel_links:
_event_channel_links[fig1] = WeakKeyDictionary()
_event_channel_links[fig1][fig2] = event_names
if fig2 not in _event_channel_links:
_event_channel_links[fig2] = WeakKeyDictionary()
_event_channel_links[fig2][fig1] = event_names
def unlink(fig):
"""Remove all links involving the event channel of the given figure.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure whose event channel should be unlinked from all other event
channels.
"""
linked_figs = _event_channel_links.get(fig)
if linked_figs is not None:
for linked_fig in linked_figs.keys():
del _event_channel_links[linked_fig][fig]
if len(_event_channel_links[linked_fig]) == 0:
del _event_channel_links[linked_fig]
if fig in _event_channel_links: # need to check again because of weak refs
del _event_channel_links[fig]
@contextlib.contextmanager
def disable_ui_events(fig):
"""Temporarily disable generation of UI events. Use as context manager.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D
The figure whose UI event generation should be temporarily disabled.
"""
_disabled_event_channels.add(fig)
try:
yield
finally:
_disabled_event_channels.remove(fig)
| null |
mne/viz/ui_events.py
|
ui_events.py
|
py
| 10,164 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "weakref.WeakKeyDictionary",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "weakref.WeakKeyDictionary",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "weakref.WeakSet",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.figure",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "utils.warn",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "utils.warn",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "weakref.WeakKeyDictionary",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "weakref.WeakKeyDictionary",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 292,
"usage_type": "attribute"
}
] |
529358272
|
import pytest
@pytest.fixture
def creature():
from dread_snarfle.model import CreatureModel
return CreatureModel(100, 'Confused Wumpus')
@pytest.fixture
def item_properties():
return {
'names': [
'Slayer of Wumpuses',
'Scourge of the Wombat'
],
'repairs': [
{'description': 'Clean and polish', 'skill': 5},
{'description': 'Replace handle', 'skill': 3},
],
'qualities': {
'cfsfatwpt': 33,
'vsWombat': 1,
'vsWumpus': 2
}
}
@pytest.fixture
def item(item_properties):
from dread_snarfle.model import ItemModel
return ItemModel(
100,
'Tarnished Kitchen Knife',
item_properties['names'],
item_properties['repairs'],
item_properties['qualities']
)
@pytest.fixture
def player():
from dread_snarfle.model import PlayerModel
return PlayerModel(100, 'ABCDE123456', '1234567ABCDE', 'Testy McTesterson')
def test_creature_constructor(creature):
assert creature.intact == 100
assert creature.name == 'Confused Wumpus'
def test_item_constructor(item, item_properties):
assert item.intact == 100
assert item.name == 'Tarnished Kitchen Knife'
assert item.names == item_properties['names']
assert item.repairs == item_properties['repairs']
assert item.qualities == item_properties['qualities']
def test_player_constructor(player):
assert player.intact == 100
assert player.item == 'ABCDE123456'
assert player.model == '1234567ABCDE'
assert player.name == 'Testy McTesterson'
| null |
tests/model.py
|
model.py
|
py
| 1,624 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dread_snarfle.model.CreatureModel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "dread_snarfle.model.ItemModel",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dread_snarfle.model.PlayerModel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 41,
"usage_type": "attribute"
}
] |
465194728
|
#
# @lc app=leetcode id=200 lang=python3
#
# [200] Number of Islands
#
from typing import List
# @lc code=start
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if(len(grid)==0 or len(grid[0])==0):
return 0
res = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if(grid[i][j] == '1'):
self.helper(grid, i, j)
res += 1
return res
def helper(self, grid, x, y):
if(x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0])):
return
if(grid[x][y] == '0'):
return
grid[x][y] = '0'
self.helper(grid, x - 1, y)
self.helper(grid, x + 1, y)
self.helper(grid, x, y - 1)
self.helper(grid, x, y + 1)
# @lc code=end
print(Solution().numIslands([["1","1","0","0","0"],["1","1","0","0","0"],["0","0","1","0","0"],["0","0","0","1","1"]]))
| null |
heregreat/python/200.number-of-islands.py
|
200.number-of-islands.py
|
py
| 949 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
}
] |
400188151
|
from django.conf.urls import include, url
from .views import ProductDetailView, ProductListView, VariationListView
urlpatterns = [
url(r'^$', ProductListView.as_view(), name="list"),
url(r'^(?P<pk>\d+)/$', ProductDetailView.as_view(), name="detail"),
url(r'^(?P<pk>\d+)/inventory/$', VariationListView.as_view(), name="variation_list"),
]
| null |
src/products/urls.py
|
urls.py
|
py
| 353 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.ProductListView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.ProductListView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.ProductDetailView.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.ProductDetailView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.VariationListView.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.VariationListView",
"line_number": 9,
"usage_type": "name"
}
] |
144765274
|
#!/usr/bin/env python
# coding: utf-8
import os
import re
import sys
import pandas as pd
import numpy as np
import glob
import csv
import logging
import argparse
import gzip
parser = argparse.ArgumentParser(description='Add rsIDs to DIAMANTE files.', add_help = True)
parser.add_argument('diamante', type = str, help = 'DIAMANTE summary stats file (unzipped).')
parser.add_argument('out', type = str, help = 'File to create')
parser.add_argument('--vcf', type = str, default='/lab/data/reference/human/hg19/annot/dbsnp150_variants/All_20170710.vcf.gz', help = 'dbSNP VCF file (default: /lab/data/reference/human/hg19/annot/dbsnp150_variants/All_20170710.vcf.gz).')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s: %(message)s')
# load in the diamante data. Determine the ref and alt alleles...
logging.info('Reading DIAMANTE GWAS file')
diamante = pd.read_csv(args.diamante, delimiter='\t')
# map chrom:pos:ref:alt -> rsid
chrom_pos = set([i for i in (diamante.Chr.map(str) + ':' + diamante.Pos.map(str))])
logging.info('Looking for information for {} variants'.format(len(chrom_pos)))
chrom_pos_ref_alt_to_rsid = dict()
logging.info('Scanning through VCF to find variants')
VCF_HEADER_RE = re.compile('^#')
line_count = 0
with gzip.open(args.vcf, 'rt') as f:
for line in f:
line_count += 1
if line_count % 5000000 == 0:
logging.info('Processed {} lines of the VCF'.format(line_count))
if VCF_HEADER_RE.match(line):
continue
line = line.rstrip().split()
chrom, pos, rsid, ref, alt = line[0:5]
if '{chrom}:{pos}'.format(**locals()) in chrom_pos:
for i in alt.split(','):
key = '{}:{}:{}:{}'.format(chrom, pos, ref, i)
chrom_pos_ref_alt_to_rsid[key] = rsid
logging.info('Stored information for {} variants'.format(len(chrom_pos_ref_alt_to_rsid)))
logging.info('Determining the rsIDs')
conversions = []
count = 0
converted_count = 0
for index, row in diamante.iterrows():
count += 1
if count % 1000000 == 0:
logging.info('Converted {} of {} SNPs so far'.format(converted_count, count))
option_1 = '{Chr}:{Pos}:{EA}:{NEA}'.format(**row)
option_2 = '{Chr}:{Pos}:{NEA}:{EA}'.format(**row)
if option_1 not in chrom_pos_ref_alt_to_rsid and option_2 not in chrom_pos_ref_alt_to_rsid:
# logging.info('No conversion for SNP at {Chr}:{Pos}'.format(**row))
conversions.append('')
continue
conversion = chrom_pos_ref_alt_to_rsid[option_1] if option_1 in chrom_pos_ref_alt_to_rsid else chrom_pos_ref_alt_to_rsid[option_2]
conversions.append(conversion)
converted_count += 1
diamante['rsID'] = conversions
converted_count = sum([1 for i in conversions if i != ''])
logging.info('Successfully converted {} of {} variants'.format(converted_count, len(conversions)))
noconversion = diamante[diamante.rsID != '']
diamante = diamante[~diamante.rsID.isnull()]
diamante.to_csv(args.out, sep='\t', index=False)
noconversion.to_csv(args.out + '.dropped', sep='\t', index=False)
| null |
bin/prep-diamante-for-ldsc.py
|
prep-diamante-for-ldsc.py
|
py
| 3,106 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 72,
"usage_type": "call"
}
] |
538265975
|
from common import random_str, auth_check
def test_ingress_fields(client):
auth_check(client.schema, 'ingress', 'crud', {
'namespaceId': 'cr',
'projectId': 'cr',
'rules': 'cru',
'tls': 'cru',
'defaultBackend': 'cru',
'status': 'r',
})
auth_check(client.schema, 'ingressBackend', '', {
'serviceId': 'cru',
'targetPort': 'cru',
'workloadIds': 'cru',
})
auth_check(client.schema, 'ingressRule', '', {
'host': 'cru',
'paths': 'cru',
})
assert 'httpIngressPath' not in client.schema.types
def test_ingress(pc):
client = pc.client
ns = pc.cluster.client.create_namespace(name=random_str(),
projectId=pc.project.id)
# wl = client.create_workload(namespaceId=ns.id,
# scale=1,
# containers=[{
# 'name': 'one',
# 'image': 'nginx',
# }])
# name = random_str()
# client.create_ingress(name=name,
# namespaceId=ns.id,
# rules=[
# {
# 'paths': {
# '/': {
# 'targetPort': 80,
# 'workloadIds': [wl.id],
# }
# }
# },
# ])
# assert ingress.rules[0]['paths']['/'] == {
# 'targetPort': 80,
# 'workloadIds': [wl.id]
# }
client.delete(ns)
| null |
tests/core/test_ingress.py
|
test_ingress.py
|
py
| 1,771 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "common.auth_check",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "common.auth_check",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "common.auth_check",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "common.random_str",
"line_number": 31,
"usage_type": "call"
}
] |
192598937
|
from unittest.mock import MagicMock
import pytest
import prefect
from prefect.tasks.twitter import LoadTweetReplies
from prefect.utilities.configuration import set_temporary_config
class TestLoadTweetReplies:
def test_initialize_with_nothing_sets_defaults(self):
task = LoadTweetReplies()
assert task.user is None
assert task.tweet_id is None
def test_initialize_kwargs_are_processed(self):
task = LoadTweetReplies(checkpoint=True, name="test")
assert task.name == "test"
assert task.checkpoint is True
@pytest.mark.parametrize("attr", ["user", "tweet_id"])
def test_initializes_attr_from_kwargs(self, attr):
task = LoadTweetReplies(**{attr: "my-value"})
assert getattr(task, attr) == "my-value"
def test_creds_are_pulled_from_secret_at_runtime(self, monkeypatch):
task = LoadTweetReplies(credentials_secret="TWITTER_API_CREDENTIALS")
tweepy = MagicMock()
monkeypatch.setattr("prefect.tasks.twitter.twitter.tweepy", tweepy)
with prefect.context(
secrets=dict(
TWITTER_API_CREDENTIALS={
"api_key": "a",
"api_secret": "b",
"access_token": "c",
"access_token_secret": "d",
}
)
):
task.run(user="")
assert tweepy.OAuthHandler.call_args[0] == ("a", "b")
| null |
tests/tasks/twitter/test_twitter.py
|
test_twitter.py
|
py
| 1,439 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "prefect.tasks.twitter.LoadTweetReplies",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "prefect.tasks.twitter.LoadTweetReplies",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "prefect.tasks.twitter.LoadTweetReplies",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "prefect.tasks.twitter.LoadTweetReplies",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "prefect.context",
"line_number": 32,
"usage_type": "call"
}
] |
558115356
|
# Create folder if doesn't exist
import os, pygame, sys
from pygame.locals import *
if not os.path.isdir('./Cloud/Download'):
pathD = os.path.join("./Cloud/Download")
os.mkdir(pathD)
if not os.path.isdir('./Cloud/Upload'):
pathU = os.path.join("./Cloud/Upload")
os.mkdir(pathU)
# PyGame
# Upload
def Upload():
print('\nAviso : Tenha certeza de que seu arquivo está na pasta "Upload"')
fileName = './Upload/'+str(input('\nNome do arquivo : '))
files = m.find(fileName)
if(files):
m.delete(files[0])
file = m.upload(fileName)
# Download
def Download():
print('\nAviso : Tenha certeza de que existe esse arquivo na sua conta')
fileName = str(input('\nNome do arquivo : '))
file = m.find(fileName)
m.download(file)
# Connection
def CheckInternet():
import requests, sys
url = 'http://mega.nz'
internet = False
while not internet:
timeout = 5
try:
request = requests.get(url, timeout=timeout)
print('\nInternet Conectada')
internet = True
except (requests.ConnectionError, requests.Timeout) as exception:
print('\nInternet Não Conectada')
print('\nTentar Novamente ?')
print('\n1 : Sim\n2 : Não')
choose = int(input('\nEscolher : '))
if choose == 2:
internet = True
exit
#
CheckInternet()
# Login
from mega import Mega
mega = Mega()
print('\nSessão de Login')
email = str(input('\nEmail : '))
password = str(input('\nSenha : '))
print('\nEntrando...')
m = mega.login(email, password)
program = True
while program:
print('\nEscolha uma opção : ')
print('\n1 : Upload\n2 : Download (Em manutenção)\n3 : Sair')
choose = int(input('\nEscolher : '))
if(choose==1):
Upload()
elif(choose==2):
program = True
elif(choose==3):
program = False
print('\nPrograma Fechado')
| null |
Cloud/fileSynch.py
|
fileSynch.py
|
py
| 2,086 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.isdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "requests.ConnectionError",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "requests.Timeout",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "mega.Mega",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "mega.login",
"line_number": 84,
"usage_type": "call"
}
] |
391292631
|
from mongo import *
import requests,json,time,random,re
from lxml import etree
class Coin():
def qwbzj(req,x,y):
a=req.find(x)
b=req.find(y,int(a+1))
return req[a+len(x):b]
def shijian(times):
if times.find('分間')!=-1:
sj=int(times[:times.find('分間')])
sj=int(time.time())-sj*60
return sj
elif times.find('時間')!=-1:
sj=int(times[:times.find('時間')])
sj=int(time.time()-sj*60*60)
return sj
elif len(times)==17:
print(times)
timeArray = time.strptime(times, "%Y-%m-%d %H:%M")
timeStamp = int(time.mktime(timeArray))
return timeStamp
elif len(times)==10 or len(times)==9:
print(times)
timeArray = time.strptime(times, "%Y/%m/%d")
timeStamp = int(time.mktime(timeArray))
return timeStamp
elif len(times)==16:
times=times+':00'
timeArray = time.strptime(times, "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return timeStamp
else:
return int(time.time())
def content(url,title,author,times,sort,img_url,brief,categories):
print('文章:%s'%url)
req=requests.get(url,verify=False).text
content=Coin.qwbzj(req,'<div id="the-content" class="entry-content">','<!-- ページリンク -->').strip()
cc=re.findall(r'href=\'[^\s]*\'',content)
dd=cc+re.findall(r'href="[^\s]*"',content)
print(dd)
for link in dd:
content=content.replace(link,'')
ad=Coin.qwbzj(content,' <!-- 広告 -->','</script></div>')
jiaoyi=Coin.qwbzj(content,'<div id="text-35" class="widget-in-article widget_text">','''</table>''')
content=content.replace(ad,'').replace(jiaoyi,'').replace('<div','<p').replace('</div>','</p>')
froms='coinpost.jp'
country='JP'
read_number=0
try:
if select_id(url,country) ==None:
id = jp_insert_simple(title,author,int(times),int(time.time()),content,froms,sort,url,img_url,brief,country,categories,read_number)
jp_insert_simple1(id)
else:
return False
except Exception as e:
return False
def news():
categories='5af58f86839f3369e4d607e7'
sort_xpath='//*[@class="category"]/a/text()'
title_xpath='//*[@class="content-rwrap"]/a/h2/text()'
date_xpath='//*[@class="published"]/text()'
link_xpath='//*[@class="content-lwrap"]/a'
briefs_xpath='//*[@class="entry-snippet"]'
img_xpath='//*[@class="content-lwrap"]/a/img'
page=1
while True:
url='http://coinpost.jp/?paged=%d'%page
print('当前目录页:%s'%url)
req=requests.get(url).text
req=etree.HTML(req)
img=req.xpath(img_xpath)
author='coinpost'
date=req.xpath(date_xpath)
title=req.xpath(title_xpath)
link=req.xpath(link_xpath)
sort=req.xpath(sort_xpath)
briefs=req.xpath(briefs_xpath)
if len(title)<2:
break
if title != None:
for num in range(len(title)):
try:
state=Coin.content(link[num].get('href'),title[num].strip(),author,Coin.shijian(date[num].strip()),sort[num],img[num].get('src'),briefs[num].text.strip(),categories)
if state == False:
break
except Exception as e:
print(e)
if state ==False:
break
else:
break
page+=1
Coin.news()
| null |
coin_news/jp_coinpost.py
|
jp_coinpost.py
|
py
| 3,836 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 72,
"usage_type": "name"
}
] |
557232027
|
from utils import *
import networkx
import sys
import warnings
warnings.filterwarnings('ignore')
def get_summarization_by_textrank(text, max_len=200):return merge_sen_from_scores(text, text_rank, max_len)
def text_rank(text, window=5):
graph = get_sentence_graph(text, window)
scored_sen = networkx.pagerank(graph)
ranked_sen = sorted(scored_sen.items(), key=lambda x: x[-1], reverse=True)
return ranked_sen
def get_sentence_graph(text, window=5):
sen_graph = networkx.graph.Graph()
sub_sens = split_sentences(text)[::2]
for i, sen in enumerate(sub_sens):
for ii in range(i - window, i + window + 1):
if ii >= 0 and ii < len(sub_sens):
edge = (sen, sub_sens[ii])
sen_graph.add_edges_from([edge])
return sen_graph
with open(sys.argv[1], 'r',encoding='utf-8') as f:
t = f.readlines()[0]
summary = ''.join(get_summarization_by_textrank(t))
print(summary)
if summary is not None:
print('save in working dir?Y/N')
if input() in ('Y', 'y'):
with open(sys.argv[1][:-4]+'summ.txt','w') as f:
f.write(summary)
print('finished!')
| null |
mozhiwen/text_rank.py
|
text_rank.py
|
py
| 1,160 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "networkx.pagerank",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "networkx.graph.Graph",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "networkx.graph",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
}
] |
407798749
|
"""Limit vcf to coding regions pulled from gemini."""
import csv, argparse
def loadPos(chrom, codingFile):
pos = {}
with open(codingFile) as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
if row['chrom'] == chrom:
p = str( int(row['start']) +1)
pos[p] = True
return pos
def limit(pos, vcfIn, fout):
with open(vcfIn) as f:
for line in f:
if line[0] == '#':
print(line.strip(), file=fout)
else:
key = line.split()[1]
if key in pos:
print(line.strip(), file=fout)
def main(args):
pos = loadPos(args.chrom, args.codingFile)
with open(args.vcfOut, 'w') as fout:
limit(pos, args.vcfIn, fout)
if __name__ == "__main__":
desc = 'Limit vcf file to coding regions'
parser = argparse.ArgumentParser(description=desc)
argLs = ('chrom', 'codingFile', 'vcfIn', 'vcfOut')
for param in argLs:
parser.add_argument(param)
args = parser.parse_args()
main(args)
| null |
code/scripts/limitToCoding.py
|
limitToCoding.py
|
py
| 1,088 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csv.DictReader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
}
] |
62191678
|
'''
TRAM-ID : SK
TR 내용 : 현물 거래원
INPUT FIELD
[Single 데이터】
Field# 항 목 명 SIZE 항 목 내 용 설 명
0 단축코드 6
'''
# -*- coding: utf-8 -*-
import sys
import time
from PyQt5.QAxContainer import *
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import *
from PyQt5.QAxContainer import *
from time import sleep
import requests
from pymongo import MongoClient
import datetime
from datetime import timedelta
from data.common import mongo_find
import json
from data.TR_1206 import TR_1206
class SK(QMainWindow):
def __init__(self, stock_code):
super().__init__()
# 인디의 TR을 처리할 변수를 생성합니다.
self.IndiTR = QAxWidget("GIEXPERTCONTROL.GiExpertControlCtrl.1")
# Indi API event
self.IndiTR.ReceiveData.connect(self.ReceiveData)
self.IndiTR.ReceiveSysMsg.connect(self.ReceiveSysMsg)
self.stock_code= stock_code
self.btn_Search()
# 그럼 실제로 과거 주가를 받아오는 부분은 다음과 같습니다.
def btn_Search(self):
# 차트조회 : 과거주가는 차트조회를 통해 받아올 수 있습니다.
# TR_SCHART : 과거주가를 요청할 TR입니다.
# 해당 TR의 필드입력형식에 맞춰서 TR을 날리면 됩니다.
# 데이터 요청 형식
ret = self.IndiTR.dynamicCall("SetQueryName(QString)", "SK")
ret = self.IndiTR.dynamicCall("SetSingleData(int, QString)", 0, self.stock_code) # 단축코드
rqid = self.IndiTR.dynamicCall("RequestData()") # 데이터 요청
# 요청한 TR로 부터 데이터를 받는 함수입니다.
def ReceiveData(self, rqid):
# TR을 날릴때 ID를 통해 TR이름을 가져옵니다.
# GetMultiRowCount()는 TR 결과값의 multi row 개수를 리턴합니다.
stock_data =['체결시간' , '국내총순매수대금', '외국계총순매수대금', '전체순매수대금', '단축코드']
# 데이터 양식
DATA = {}
print(self.IndiTR.dynamicCall("GetSingleData(int)", 0) )
# 데이터 받기
DATA[stock_data[0]] = self.IndiTR.dynamicCall("GetSingleData(int)", 2) # 체결시간
DATA[stock_data[1]] = self.IndiTR.dynamicCall("GetSingleData(int)", 42) # 국내총순매수대금
DATA[stock_data[2]] = self.IndiTR.dynamicCall("GetSingleData(int)", 48) # 외국계총순매수대금
DATA[stock_data[3]] = self.IndiTR.dynamicCall("GetSingleData(int)", 54) # 전체순매수대금
DATA[stock_data[4]] = self.IndiTR.dynamicCall("GetSingleData(int)", 1) # 단축코드
rqid = self.IndiTR.dynamicCall("ClearReceiveBuffer()") # 데이터 요청
print("SK")
print(DATA)
print("SK")
print( DATA[stock_data[2]] is not '' and DATA[stock_data[1]] is not '')
if DATA[stock_data[2]]is not '' and DATA[stock_data[1]] is not '':
if int(DATA[stock_data[2]])>0 and int(DATA[stock_data[1]])<0 :
TR_1206_vari = TR_1206(DATA[stock_data[4]], "20200113", "20200114", "1", "1")
time.sleep(1)
return
print("check1")
print("check2")
return
#client = MongoClient('127.0.0.1', 27017)
#db = client["stock_data"]
#collection = db["SK"]
#print(collection.insert(DATA))
#time.sleep(0.5)
#QCoreApplication.instance().quit()
# 시스템 메시지를 받은 경우 출력합니다.
def ReceiveSysMsg(self, MsgID):
print("System Message Received = ", MsgID)
if __name__ == "__main__":
client = MongoClient('127.0.0.1', 27017)
db = client["stock_data"]
collection = db["stock_mst"]
app = QApplication(sys.argv)
for i in collection.find():
SK_vari = SK(i["단축코드"])
time.sleep(0.5)
app.exec_()
| null |
data/mst_SK_TR_1206.py
|
mst_SK_TR_1206.py
|
py
| 3,933 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "data.TR_1206.TR_1206",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 105,
"usage_type": "call"
}
] |
137047765
|
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import urllib
import urllib2
import re
from bs4 import BeautifulSoup as bs
# get the number list of AC subjects
def getNumberList():
url = 'http://acm.tju.edu.cn/toj/user_3013216077.html'
request = urllib.urlopen(url);
html_data = request.read().decode('utf-8')
#print(html_data)
soup = bs(html_data,'html.parser')
numberlist = soup.find_all('script' ,type='text/javascript')
#print(numberlist[0].string)
pattern = re.compile(r'p\(....\)')
NumberList_temp = re.findall(pattern,numberlist[0].string)
NumberList = []
for i in NumberList_temp:
# print(i[2:6])
NumberList.append(i[2:6])
return NumberList
def getRunID(sum):
url = 'http://acm.tju.edu.cn/toj/status.php?accept=1&user=3013216077'
RunIdList = []
count = 0
while True:
request = urllib.urlopen(url)
html_data = request.read().decode('utf-8')
soup = bs(html_data,'html.parser')
numberlist = soup.find_all('tr',align='center',height='30')
for item in numberlist:
count = count + 1
if count == sum + 1:
break
runid_temp = {}
temp = item.find_all('td')
runid_temp['runid'] = temp[0].string
runid_temp['number'] = temp[2].find_all('a')[0].string
RunIdList.append(runid_temp)
if count == sum + 1:
break
url = 'http://acm.tju.edu.cn/toj/status.php?user=3013216077&accept=1&start=' + str(int(RunIdList[count - 1]['runid']) - 1)
return RunIdList
def main():
numberlist = getNumberList()
RunIdList = getRunID(len(numberlist))
print(len(RunIdList))
for i in range(len(numberlist)):
#for i in range(3):
url = 'http://acm.tju.edu.cn/toj/show_code.php'
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
values = {
'user_id': '3013216077',
'sid':RunIdList[i]['runid'],
'passwd':'**********'} # your password
headers = {'User_agent' : user_agent}
data = urllib.urlencode(values)
request = urllib2.Request(url,data,headers)
response = urllib2.urlopen(request)
html_data = response.read().decode('utf-8')
soup = bs(html_data,'html.parser')
print(RunIdList[i]['number'])
sourcecode = soup.find_all('pre')[0].string
textname = './' + RunIdList[i]['number'] + '.cpp'
file = open(textname,'w+')
file.write(sourcecode)
file.close()
main()
| null |
爬虫/爬取TOJ上AC题目源码/test.py
|
test.py
|
py
| 2,323 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.setdefaultencoding",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "urllib.urlencode",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "urllib2.Request",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 70,
"usage_type": "call"
}
] |
355219813
|
# -*- coding: utf-8 -*-
import pygame
class Ship():
def __init__(self, ai_settings, screen):
"""初始化飞船并设置其初始位置"""
#用传入的screen设置ship的screen
self.screen = screen
#获取飞船的设置类
self.ai_settings = ai_settings
"""加载飞船图像并获取其外接矩形"""
self.image = pygame.image.load('images/ship.bmp')
#将ship设置为传入的图像的矩形
self.rect = self.image.get_rect()
#将屏幕设置为传入屏幕的矩形
self.screen_rect = self.screen.get_rect()
"""将每艘新飞船放在屏幕的底部中央"""
#将ship矩形的中点x坐标设置为屏幕矩形的中点x坐标(设置水平距离)
self.rect.centerx = self.screen_rect.centerx
#将ship矩形的底设置为屏幕矩形的底
self.rect.bottom = self.screen_rect.bottom
#设置向右移动标志
self.moving_right = False
#设置向左移动标志
self.moving_left = False
#因为ship.rect.centerx为整型 无法直接存储float值 所以设置一个中间量来存储
self.center = float(self.rect.centerx)
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image, self.rect)
def update(self):
"""根据移动标志调整飞船的位置"""
#self是一个rect类,有个属性值为right 通过判断飞船的rect的right与屏幕的rect的right的大小关系 来判断飞船是否到达最右边边缘
if self.moving_right and self.rect.right < self.screen_rect.right:
#self.rect.centerx += 1
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
#self.rect.centerx -= 1
self.center -= self.ai_settings.ship_speed_factor
#根据self.center更新rect对象
self.rect.centerx = self.center
def center_ship(self):
'''让飞船在屏幕上居中'''
self.center = self.screen_rect.centerx
| null |
ship.py
|
ship.py
|
py
| 1,865 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 15,
"usage_type": "attribute"
}
] |
597560278
|
import django
from django import forms
from django.contrib.sites.models import Site
import os, datetime
from models import Image, Video, Audio, Flash, Document
class ContentCreationForm(forms.ModelForm):
"""
A form that creates a piece of content from a file and title.
"""
title = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'size':85}))
external_url = forms.URLField(
required=False,
help_text="If this URLField is set, the media will be pulled externally")
file = forms.FileField(required=False)
creation_date = forms.DateTimeField()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=forms.util.ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
# set a default creation date and add the current site
if not instance and (initial is not None and not initial.has_key('creation_date')):
initial['creation_date'] = datetime.datetime.now()
if not instance and (initial is not None and not initial.has_key('site')):
initial['site'] = Site.objects.get_current().id
super(ContentCreationForm, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix,
empty_permitted, instance)
class ImageCreationForm(ContentCreationForm):
title = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'size':85}), required=False)
slug = forms.CharField(required=False)
file = forms.FileField(required=False)
class Meta:
model = Image
def set_title_and_slug(self):
"""
If the title is empty, set it to the name of the uploaded or external file
"""
from django.template.defaultfilters import slugify
if not self.cleaned_data['title']:
if self.cleaned_data.has_key('file') and hasattr(self.cleaned_data['file'], 'name'):
filename = self.cleaned_data['file'].name
elif self.cleaned_data['external_url']:
filepath = self.cleaned_data['file'] or self.cleaned_data['external_url'].split('?')[0]
filename = os.path.basename(filepath)
else:
return
self.cleaned_data['title'] = filename
if not self.cleaned_data['slug']:
slug = slugify(self.cleaned_data['title'])
else:
slug = self.cleaned_data['slug']
try:
Image.objects.get(slug=slug)
slug = "%s_%d" % (slug, datetime.datetime.now().toordinal())
except Image.DoesNotExist:
pass
self.cleaned_data['slug'] = slug
def clean(self):
if (not self.cleaned_data.has_key('file') or not self.cleaned_data['file']) and not self.cleaned_data['external_url']:
raise forms.ValidationError("You must include either a file or external url")
self.set_title_and_slug()
return super(ImageCreationForm, self).clean()
class VideoCreationForm(ContentCreationForm, forms.ModelForm):
class Meta:
model = Video
class AudioCreationForm(ContentCreationForm, forms.ModelForm):
class Meta:
model = Audio
class FlashCreationForm(ContentCreationForm, forms.ModelForm):
class Meta:
model = Flash
class DocumentCreationForm(ContentCreationForm, forms.ModelForm):
class Meta:
model = Document
| null |
eaa2011/setup/build/massmedia/massmedia/forms.py
|
forms.py
|
py
| 3,540 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.forms.URLField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.forms.FileField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.DateTimeField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.forms.util",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.sites.models.Site.objects.get_current",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.sites.models.Site.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.sites.models.Site",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.forms.FileField",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "django.template.defaultfilters.slugify",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.Image.objects.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.Image.objects",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "models.Image",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Image.DoesNotExist",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "models.Image",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "models.Video",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "models.Audio",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "models.Flash",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "models.Document",
"line_number": 89,
"usage_type": "name"
}
] |
538899661
|
#!/usr/bin/env python3
import argparse
import datetime
import os
import shutil
import hashlib
import commons
logger = commons.getlogger(name=__name__)
parser = argparse.ArgumentParser(description="sync cut media to ready upload folder")
parser.add_argument("--src", default="/data/yfm/draft/crhk/summit")
parser.add_argument("--dest", default="/data/yfm/publish/crhk/summit")
parser.add_argument("--year_since", default=datetime.date.today().year, help="import meta since this year", type=int)
def main(path_src, path_dest, year_since):
for root, _, files in os.walk(path_src):
for file_name in files:
if not file_name.endswith(".cuts.mp3"):
continue
d = commons.parse_date(s=file_name)
if d is None:
msg = "parse_date -%s- failed" % file_name
logger.debug(msg)
continue
if d.year < year_since:
msg = "year %d < year_since" % d.year
logger.debug(msg)
continue
diso = d.isoformat()[:10]
fullpath_src = os.path.join(root, file_name)
meta = commons.get_metadata(filepath=fullpath_src)
if meta["duration"] == -1 or meta["duration"] < commons.default_min_duration:
msg = "%d seconds (%d min), duration too short" % (meta["duration"], meta["duration"]/60)
logger.debug(msg)
continue
filepath_dst = os.path.join(path_dest, diso[:4], diso[5:7], diso+".mp3")
if os.path.exists(filepath_dst):
md5_dst = hashlib.md5(open(filepath_dst,'rb').read()).hexdigest()
md5_src = hashlib.md5(open(fullpath_src,'rb').read()).hexdigest()
if md5_dst == md5_src:
msg = "%s already exists, skip" % filepath_dst
logger.debug(msg)
continue
parent = os.path.dirname(filepath_dst)
if not os.path.exists(parent):
os.makedirs(parent)
shutil.copy(src=fullpath_src, dst=filepath_dst)
msg = "%s => %s" % (fullpath_src, filepath_dst)
logger.debug(msg)
logger.debug("done")
if __name__ == '__main__':
args = parser.parse_args()
if not args.src or not args.dest:
parser.print_usage()
exit(1)
main(
path_src=args.src,
path_dest=args.dest,
year_since=args.year_since,
)
| null |
tools/sync_cut_media.py
|
sync_cut_media.py
|
py
| 2,554 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "commons.getlogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "commons.parse_date",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "commons.get_metadata",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "commons.default_min_duration",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "hashlib.md5",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 59,
"usage_type": "call"
}
] |
555337244
|
import numpy as np
from cvxopt import matrix
from cvxopt import solvers
from matplotlib import cm
import matplotlib.pyplot as plt
n = 100
d = 2
c = 5
#Get train data
x = []
y = []
f = open('2a_train_data.txt','r')
for line in f:
x1,x2,y_temp=line.split(' ')
x.append(np.asarray([x1,x2],dtype=float))
y.append(y_temp)
x = np.asarray(x).T
y = np.asarray(y,dtype=float)
P = np.zeros((n+d+1,n+d+1))
P[0:2,0:2] = 0.5*np.eye(2)
q = np.zeros(n+d+1)
q[d:n+d] = c
G = np.zeros((2*n,n+d+1))
for i in range(n):
G[i,n+d] = -y[i]
G[i,0] = -x[0,i]*y[i]
G[i,1] = -x[1,i]*y[i]
G[0:n,d:n+d] = -1*np.eye(n)
G[n:2*n,d:n+d] = -1*np.eye(n)
h = np.zeros(2*n)
h[0:n] = -1
P = matrix(P,tc='d')
q = matrix(q,tc='d')
G = matrix(G,tc='d')
h = matrix(h,tc='d')
sol = solvers.qp(P,q,G,h)
var = sol['x']
w = var[0:d]
zeta = var[d:n+d]
b = var[n+d-1]
print(f'w : {w}')
print(f'b : {b}')
f = lambda x,y: w[0]*x+w[1]*y+b
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(1,2,1,projection='3d')
x_value = np.linspace(-3,3,100)
y_value = np.linspace(-3,3,100)
x_grid,y_grid = np.meshgrid(x_value,y_value)
z_values = f(x_grid,y_grid)
surf = ax.plot_surface(x_grid, y_grid, z_values,rstride=5, cstride=5,linewidth=0, cmap=cm.plasma)
ax = fig.add_subplot(1,2,2)
plt.contourf(x_grid, y_grid, z_values, 30,cmap=cm.plasma)
fig.colorbar(surf, aspect=18)
plt.tight_layout()
# Results on Train data
true = 0
for i in range(n):
y_pred = np.sign(np.dot(w.T,x[:,i])+b)
if y_pred == y[i]:
true+=1
print(f'Accuracy on train set : {true/n}')
# Results on Test data
n_test = 50
x_test = []
y_test = []
f = open('2a_test_data.txt','r')
for line in f:
x1,x2,y_temp=line.split(' ')
x_test.append(np.asarray([x1,x2],dtype=float))
y_test.append(y_temp)
x_test = np.asarray(x_test).T
y_test = np.asarray(y_test,dtype=float)
true = 0
for i in range(n_test):
y_pred = np.sign(np.dot(w.T,x_test[:,i])+b)
if y_pred == y_test[i]:
true+=1
print(f'Accuracy on test set : {true/n_test}')
| null |
Assignment 1/Code/Q2/Problem2_b.py
|
Problem2_b.py
|
py
| 1,995 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.asarray",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cvxopt.solvers.qp",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cvxopt.solvers",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm.plasma",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.plasma",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "numpy.sign",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 91,
"usage_type": "call"
}
] |
556349366
|
from __future__ import absolute_import, division, print_function
import sys
import string
import llvm.core as lc
from llvm import LLVMException
from .. import llvm_array as lla
from .blaze_kernels import BlazeElementKernel, refresh_name
from . import blaze_kernels
from blaze.py2help import _strtypes
def letters(source=string.ascii_lowercase):
k = 0
while 1:
for a in source:
yield a+str(k) if k else a
k = k+1
# An Argument to a kernel tree (encapsulates the array, argument kind and rank)
# FIXME --- perhaps we should have _llvmtype be a SCALAR kind or a string that gets converted
# to the correct llvmtype when needed.
class Argument(object):
_shape = None
def __init__(self, dshape, kind, rank, llvmtype):
self.dshape = dshape
if isinstance(kind, tuple):
kind = kind[0]
self.kind = kind
self.rank = rank
self._llvmtype = llvmtype
#def __eq__(self, other):
# if not isinstance(other, Argument):
# return NotImplemented
# # FIXME: Should remove kind check and cast different kinds
# # in the generated code.
# return ((self.dshape is other.dshape) and
# (self.rank == other.rank) and
# (self.kind==other.kind))
# FIXME:
# Because module linking destroys struct names we need to store
# a stringified version of the element type and then
# convert as needed...
@property
def llvmtype(self):
self._llvmtype = refresh_name(self._llvmtype)
return self._llvmtype
#@property
#def shape(self):
# if self._shape is None:
# if self.rank == 0:
# self._shape = ()
# else:
# self._shape = self.arr.dshape.shape[-self.rank:]
# return self._shape
def get_kernel_dshape(self):
"""Returns the kernel data-shape of the argument."""
rank = self.rank
total_dshape = self.dshape
sub = len(total_dshape)-1-rank
return total_dshape.subarray(sub)
# A KernelTree is just the bare element-wise kernel functions
# (no arguments). Any arguments are identified as unique-names
# in an abstract name-space
# All nodes in the kernel tree can also be named
# or else a unique-name
# from the abstract name-space will be created.
# Each KernelTree has a single llvm module name-space
class KernelTree(object):
_stream_of_unique_names = letters()
_stream_of_unique_kernels = letters()
_fused = None
_mark = False
_shape = None
def __init__(self, kernel, children=[], name=None):
assert isinstance(kernel, BlazeElementKernel)
for el in children:
assert isinstance(el, (KernelTree, Argument))
self.kernel = kernel
self.children = children
if name is None:
name = 'node_' + next(self._stream_of_unique_names)
self.name = name
def _reset_marks(self):
self._mark = False
if not self.leafnode:
for child in self.children:
if isinstance(child, KernelTree):
child._reset_marks()
def sorted_nodes(self):
"""Return depth-first list of unique KernelTree Nodes.
The root of the tree will be the last node.
"""
nodes = []
self._reset_marks()
self.visit(nodes)
return nodes
@property
def shape(self):
if self._shape is None:
shapeargs = [child.shape for child in self.children]
self._shape = self.kernel.shapefunc(*shapeargs)
return self._shape
@property
def leafnode(self):
return all(isinstance(child, Argument) for child in self.children)
def visit(self, nodes):
if not self.leafnode:
for child in self.children:
if isinstance(child, KernelTree):
child.visit(nodes)
if not self._mark:
nodes.append(self)
self._mark = True
def fuse(self):
if self._fused is not None:
return self._fused
# Even if self is a leaf node (self.leafnode is True), do
# this processing, so as to consistently combine repeated
# arguments.
krnlobj, children = fuse_kerneltree(self, self.kernel.module)
new = KernelTree(krnlobj, children)
self._update_kernelptrs(new)
return new
def _update_kernelptrs(self, eltree):
self._fused = eltree
kernel = eltree.kernel
def make_ckernel_deferred(self, out_dshape):
return self.fuse().kernel.make_ckernel_deferred(out_dshape)
def __str__(self):
pre = self.name + '('
post = ')'
strs = []
for child in self.children:
if isinstance(child, Argument):
strs.append('<arg>')
else:
strs.append(str(child))
body = ",".join(strs)
return pre + body + post
class _cleanup(object):
def __init__(self, builder, freefunc, freedata):
self.freefunc = freefunc
self.freedata = freedata
self.builder = builder
def _dealloc(self):
self.builder.call(self.freefunc, [self.freedata])
# This modifies the node to add a reference the output as llvm_obj
def insert_instructions(node, builder, output=None):
kernel = node.kernel
is_scalar = (kernel.kinds[-1] == lla.SCALAR)
#allocate space for output if necessary
new = None
if output is None:
if kernel.kinds[-1] == lla.POINTER:
output = builder.alloca(kernel.argtypes[-1].pointee)
elif not is_scalar: # Array
kind = kernel.kinds[-1][0]
eltype = kernel.argtypes[-1].pointee.elements[0].pointee
assert node.shape is not None
assert kernel.argtypes[-1].pointee.elements[1].count == len(node.shape)
output, freefunc, freedata = lla.create_array(
builder, node.shape, kind, eltype)
new = _cleanup(builder, freefunc, freedata)
#Setup the argument list
args = [child.llvm_obj for child in node.children]
if not is_scalar:
args.append(output)
# call the kernel corresponding to this node
# bitcast any arguments that don't match the kernel.function type
# for array types and pointer types... Needed because inputs might
# be from different compilers...
newargs = []
kfunc_args = kernel.func.type.pointee.args
for kind, oldarg, needed_type in zip(kernel.kinds, args, kfunc_args):
newarg = oldarg
if (kind != lla.SCALAR) and (needed_type != oldarg.type):
newarg = builder.bitcast(oldarg, needed_type)
newargs.append(newarg)
res = builder.call(kernel.func, newargs)
assert kernel.func.module is builder.basic_block.function.module
if is_scalar:
node.llvm_obj = res
else:
node.llvm_obj = output
return new
# This also replaces arguments with the unique argument in the kernel tree
def find_unique_args(tree, unique_args):
for i, element in enumerate(tree.children):
if isinstance(element, Argument):
try:
index = unique_args.index(element)
except ValueError: # not found
unique_args.append(element)
else:
tree.children[i] = unique_args[index]
else:
find_unique_args(element, unique_args)
def get_fused_type(tree):
"""Get the function type of the compound kernel
"""
outkrn = tree.kernel
# If this is not a SCALAR then we need to attach another node
out_kind = outkrn.kinds[-1]
out_type = outkrn.func.type.pointee.return_type
unique_args = []
find_unique_args(tree, unique_args)
args = [arg.llvmtype for arg in unique_args]
if out_kind != lla.SCALAR:
args.append(outkrn.argtypes[-1])
return unique_args, lc.Type.function(out_type, args)
def fuse_kerneltree(tree, module_or_name):
"""Fuse the kernel tree into a single kernel object with the common names
Examples:
add(multiply(b,c),subtract(d,f))
var tmp0 = multiply(b,c)
var tmp1 = subtract(d,f)
return add(tmp0, tmp1)
var tmp0;
var tmp1;
multiply(b,c,&tmp0)
subtract(d,f,&tmp1)
add(tmp0, tmp1, &res)
"""
if isinstance(module_or_name, _strtypes):
module = Module.new(module_or_name)
else:
module = module_or_name
args, func_type = get_fused_type(tree)
outdshape = tree.kernel.dshapes[-1]
try:
func = module.get_function_named(tree.name+"_fused")
except LLVMException:
func = lc.Function.new(module, func_type, tree.name+"_fused")
block = func.append_basic_block('entry')
builder = lc.Builder.new(block)
# TODO: Create wrapped function for functions
# that need to loop over their inputs
# Attach the llvm_object to the Argument objects
for i, arg in enumerate(args):
arg.llvm_obj = func.args[i]
# topologically sort the kernel-tree nodes and then for each node
# site we issue instructions to compute the value
nodelist = tree.sorted_nodes()
cleanup = [] # Objects to deallocate any temporary heap memory needed
# ust have a _dealloc method
def _temp_cleanup():
for obj in cleanup:
if obj is not None:
obj._dealloc()
#import pdb
#pdb.set_trace()
for node in nodelist[:-1]:
node.kernel.attach(module)
new = insert_instructions(node, builder)
cleanup.append(new)
nodelist[-1].kernel.attach(module)
if tree.kernel.kinds[-1] == lla.SCALAR:
new = insert_instructions(nodelist[-1], builder)
cleanup.append(new)
_temp_cleanup()
builder.ret(nodelist[-1].llvm_obj)
else:
new = insert_instructions(nodelist[-1], builder, func.args[-1])
cleanup.append(new)
_temp_cleanup()
builder.ret_void()
dshapes = [arg.get_kernel_dshape() for arg in args]
dshapes.append(outdshape)
newkernel = BlazeElementKernel(func, dshapes)
return newkernel, args
| null |
blaze/compute/bkernel/kernel_tree.py
|
kernel_tree.py
|
py
| 10,324 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "string.ascii_lowercase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "blaze_kernels.refresh_name",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "blaze_kernels.BlazeElementKernel",
"line_number": 84,
"usage_type": "argument"
},
{
"api_name": "llvm.core.Type.function",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "llvm.core.Type",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "llvm.core",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "blaze.py2help._strtypes",
"line_number": 265,
"usage_type": "argument"
},
{
"api_name": "llvm.LLVMException",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "llvm.core.Function.new",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "llvm.core.Function",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "llvm.core",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "llvm.core.Builder.new",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "llvm.core.Builder",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "llvm.core",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "blaze_kernels.BlazeElementKernel",
"line_number": 322,
"usage_type": "call"
}
] |
649163086
|
import xml.etree.cElementTree as ET
import sys
import re
import csv
import collections
globalMsgListFile = "C:/Users/robert.ilitoi/Desktop/Warning View_subpriorities/Global_Msg_List.csv"
xmlFile1 = "d:/FordProject/Repository/Loop 7/FordS0_AMT_Configuration/Warnings/WarningDispatchers.xml"
translationFile2 = 'C:/Users/robert.ilitoi/Desktop/Warning View_subpriorities/WarningDispatchers2.xml'
warningMap = {}
warningMap2 = {}
globalMsgList = {}
data = []
tree = ET.ElementTree(file=xmlFile1)
container = tree.iterfind('.//WarningView')
def copy_tree( tree_root ):
return ET.ElementTree( tree_root );
def loadGlobalWarningList():
with open(globalMsgListFile, mode='r') as infile:
reader = csv.DictReader(infile)
key = ''
priority = 2
subprio = 1.0
for rows in reader:
key=rows['ID'].strip( ) #WARN_TYPE,TIME_OUT,LM,ICON,COLOR,CHIME_TYPE,TEXT
try:
priority = int(rows['PRIORITY'].strip( ) )
except Exception:
priority = 2
try:
subprio = float(rows['SUBPRIO'] )
except Exception:
subprio = 255
warningObj = (key,priority,subprio)
globalMsgList[key]=warningObj
loadGlobalWarningList()
root = tree.getroot()
for elem in container:
key = elem.attrib["WarningName"]
if(key in globalMsgList):
w = globalMsgList[key]
prio = w[1]
subprio = w[2]
data.append ( (key, prio, subprio, elem) )
#sortedMap = collections.OrderedDict( sorted( warningMap.items( ) ) )
# insert the last item from each tuple
#with open('TestDispatcher1.txt','w') as f:
# for index, wrnObject in sortedMap.items():
# f.write(wrnObject)
# f.write("\n")
data.sort( key=lambda row: (row[1], row[2]) )
myNewTree = ET.ElementTree(root)
myNewRoot = myNewTree.getroot()
myNewRoot.getchildren()[0].getchildren()[0].getchildren()[0].getchildren()[1].clear( )
for item in data:
#print(ET.tostring(item[3] ) )
myNewRoot.getchildren()[0].getchildren()[0].getchildren()[0].getchildren()[1].append(item[3])
myNewTree.write("NewWarningsDispatchers.xml")
print("SUCCESS, Warning Views have been ordered, compare the output with your original file!")
#container[:] = [item[-3] for item in data]
#tree.write("new-data.xml")
#tree2 = ET.ElementTree(file=translationFile2)
#for elem in tree2.iterfind('.//WarningView'):
# key = elem.attrib["WarningName"]
# warningMap2[key] = elem
# sortedMap = collections.OrderedDict( sorted( warningMap2.items( ) ) )
# with open('TestDispatcher2.txt','w') as f:
# for index, wrnObject in sortedMap.items():
# f.write(wrnObject)
# f.write("\n")
| null |
hmi_wrn_test_comparator.py
|
hmi_wrn_test_comparator.py
|
py
| 2,947 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xml.etree.cElementTree.ElementTree",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.ElementTree",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree.ElementTree",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 71,
"usage_type": "name"
}
] |
395730149
|
#coding:utf-8
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from models import User, Act
from django import forms
# Create your views here.
class UserForm(forms.Form):
username1 = forms.CharField(label='username',max_length=15)
nickname1 = forms.CharField(label='nickname',max_length=15)
password1 = forms.CharField(label='password',widget=forms.PasswordInput())
age1 = forms.IntegerField(label='age')
sexChoices=((True,'Man'),(False,'Woman'))
sex = forms.ChoiceField(choices=sexChoices,label='sex')
class LoginForm(forms.Form):
username = forms.CharField(label='username',max_length=15)
password = forms.CharField(label='password',widget=forms.PasswordInput())
class CreateForm(forms.Form):
actname = forms.CharField(label='actname',max_length=15)
#actdate = forms.DateField(label='actdate')
actdate = forms.DateField(label='Date(mm/dd/yy)')
location= forms.CharField(label='location',max_length=100)
beforeChoices=((True,'Yes'),(False,'No'))
before = forms.ChoiceField(choices=beforeChoices,label='before')
budget = forms.FloatField(label='budget')
cost = forms.FloatField(label='cost')
def login(request):
username = request.COOKIES.get('username','')
if username:
return HttpResponseRedirect('/online/userinfo/')
if request.method =='POST':
lf = LoginForm(request.POST)
uf = UserForm(request.POST)
if lf.is_valid():
username = lf.cleaned_data['username']
password = lf.cleaned_data['password']
user = User.objects.filter(username__exact = username,password__exact = password)
if user:
response = HttpResponseRedirect('/online/userinfo/')
response.set_cookie('username',username,36000)
return response
else:
return HttpResponseRedirect('/online/login/')
if uf.is_valid():
username = uf.cleaned_data['username1']
nickname = uf.cleaned_data['nickname1']
password = uf.cleaned_data['password1']
age = uf.cleaned_data['age1']
sex = uf.cleaned_data['sex']
user = User.objects.filter(username=username)
if len(user)>0:
return HttpResponse('Regist failed --- User name has already been used.')
User.objects.create(username=username,nickname=nickname,password=password,age=age,sex=sex)
response = HttpResponseRedirect('/online/userinfo/')
response.set_cookie('username',username,36000)
return response
else:
lf = LoginForm()
uf = UserForm()
return render(request, 'login.html', {'lf':lf,'uf':uf})
'''def index(request):
username = request.COOKIES.get('username','')
return render_to_response('index.html',{'username':username})'''
def index(request):
return render(request, 'index.html')
def logout(request):
response = HttpResponseRedirect('/online/login/')
response.delete_cookie('username')
return response
def userinfo(request):
username = request.COOKIES.get('username','')
p = User.objects.get(username = username)
nickname = p.nickname
age = p.age
sex = p.sex
if sex:
sexot = 'Man'
else:
sexot = 'Woman'
acts = {}
for act in p.act_set.all():
acts[act.id] = act.actname
ud={'username':username,'nickname':nickname,'age':age,'sex':sexot}
nowuser = User.objects.get(username=username)
nowID = nowuser.id
if request.method == 'POST':
cf = CreateForm(request.POST)
if cf.is_valid():
actname = cf.cleaned_data['actname']
actdate = cf.cleaned_data['actdate']
location= cf.cleaned_data['location']
before = cf.cleaned_data['before']
budget = cf.cleaned_data['budget']
cost = cf.cleaned_data['cost']
actnow=Act.objects.create(actname=actname,actdate=actdate,location=location,before=before,budget=budget,cost=cost,owner=nowID)
actnow.partner.add(nowuser)
return HttpResponseRedirect('/online/userinfo/')
else:
cf = CreateForm()
return render(request, 'userinfo.html', locals())
# {'ud':ud,'username':username,'acts':acts,'nickname':nickname,'cf':cf}
def actinfo(request,actid):
username = request.COOKIES.get('username','')
nowuser = User.objects.get(username=username)
now = Act.objects.filter(id=actid)
if len(now)!=0:
actname = now[0].actname
actdate = now[0].actdate
location= now[0].location
brefore = now[0].before
budget = now[0].budget
cost = now[0].cost
recive = now[0].recive
owner = now[0].owner
able = now[0].able
partner = {}
u = User.objects.get(id=owner)
alluser = User.objects.all()
nu = []
flag = nowuser in now[0].accept.all()
num1 = len(now[0].partner.all())
num2 = len(now[0].accept.all())
num3 = num1-num2
fflag = (num1*3 <= num2*4)
for user in now[0].partner.all():
if user.id == u.id: partner[user.id]=user.username+" (Owner)"
else: partner[user.id]=user.username
for user in alluser:
if not user in now[0].partner.all():
nu.append(user)
if request.method == 'POST':
# return HttpResponse('Create Action success.')
for key,value in request.POST.items():
if key == 'yes':
now[0].able = False
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
if key == 'exit':
now[0].partner.remove(nowuser)
now[0].accept.remove(nowuser)
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
if key == 'join':
now[0].partner.add(nowuser)
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
if key == 'accept':
now[0].accept.add(nowuser)
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
if key == 'refuse':
now[0].accept.remove(nowuser)
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
s = User.objects.get(id=key)
if s in nu : now[0].partner.add(s)
else:
now[0].partner.remove(s)
now[0].accept.remove(s)
now[0].save()
return HttpResponseRedirect('/online/actinfo/'+actid)
nd=budget-recive
pnd=nd/len(partner)
return render(request, 'actinfo.html', locals())
'''
{'pnd':pnd,'actname':actname,'actdate':actdate,'able':able,'partner':partner,\
'u':u,'username':username,'nu':nu,'owner':owner,'budget':budget,'location':location,'recive':recive,'nd':nd,\
'cost':cost,'now':now,'nowuser':nowuser,'flag':flag,'fflag':fflag,'num1':num1,'num2':num2,'num3':num3}
'''
return HttpResponse('Wrong Action ID!')
def userinfo_new(request):
username = request.COOKIES.get('username','')
p = User.objects.get(username = username)
nickname = p.nickname
age = p.age
sex = p.sex
if sex:
sexot = 'Man'
else:
sexot = 'Woman'
acts = {}
for act in p.act_set.all():
acts[act.id] = act.actname
ud={'username':username,'nickname':nickname,'age':age,'sex':sexot}
nowuser = User.objects.get(username=username)
nowID = nowuser.id
if request.method == 'POST':
cf = CreateForm(request.POST)
if cf.is_valid():
actname = cf.cleaned_data['actname']
actdate = cf.cleaned_data['actdate']
location= cf.cleaned_data['location']
before = cf.cleaned_data['before']
budget = cf.cleaned_data['budget']
cost = cf.cleaned_data['cost']
actnow=Act.objects.create(actname=actname,actdate=actdate,location=location,before=before,budget=budget,cost=cost,owner=nowID)
actnow.partner.add(nowuser)
return HttpResponseRedirect('/online/userinfo_new/')
else:
cf = CreateForm()
acts_id=acts.keys()
acts_id.sort()
acts_list=[]
cost=0.0
for i in acts_id:
act_now=Act.objects.get(id=i)
owner_name=User.objects.get(id=act_now.owner).username
acts_list.append( (i,act_now.actname,owner_name,act_now.actdate,act_now.able) )
pnum=len(act_now.partner.all())
if not act_now.able:
cost=cost+act_now.cost/pnum
acts_num=len(acts_list)
# {'cost':cost,'ud':ud,'username':username,'acts':acts_list,'nickname':nickname,'cf':cf,'acts_id':acts_id,'acts_num':acts_num,}
return render(request, 'userinfo_new.html', locals())
| null |
views.py
|
views.py
|
py
| 9,509 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.Form",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.forms.PasswordInput",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.forms.PasswordInput",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.forms.Form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.forms.DateField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.forms.FloatField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.forms.FloatField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.User.objects.filter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.User.objects.filter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models.User.objects.create",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "models.Act.objects.create",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "models.Act.objects",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "models.Act",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "models.Act.objects.filter",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "models.Act.objects",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "models.Act",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "models.User.objects.all",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "models.Act.objects.create",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "models.Act.objects",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "models.Act",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "models.Act.objects.get",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "models.Act.objects",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "models.Act",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 236,
"usage_type": "call"
}
] |
435609284
|
# encoding=utf8
import os
import random
import socket
import time
import TickerConfig
import requests
from bs4 import BeautifulSoup
class proxy:
def __init__(self):
self.proxy_list = []
self.proxy_filter_list = []
ip = self.get_filter_proxy()
self.current = {
'http': 'http://{}'.format(ip[0]),
'https': 'http://{}'.format(ip[0]),
}
def get_proxyFromCloud(self):
resp = requests.get("{}/get/".format(TickerConfig.PROXY_HOST)).json()['proxy']
self.current = {
'http': 'http://{}'.format(resp),
'https': 'http://{}'.format(resp),
}
if(self.test_proxy()):
path = os.path.join(os.path.dirname(__file__), './proxy_list')
f = open(path, "w")
f.write(resp)
return self.current
else:
return self.get_proxyFromCloud()
def delete_proxy(self):
if(self.current != {}):
print('删除' + self.current['http'])
requests.get("{}/delete/?proxy={}".format(TickerConfig.PROXY_HOST, self.current['http'].replace('http://','')))
self.current = {}
def new_proxy(self):
self.delete_proxy()
return self.get_proxyFromCloud()
def test_proxy(self):
head = {
'Connection': 'keep-alive',
"Referer": "https://kyfw.12306.cn/otn/resources/login.html",
"Host": "kyfw.12306.cn",}
try:
requests.get("https://kyfw.12306.cn/otn/leftTicket/init", proxies=self.current, timeout=2, headers=head)
return True
except (requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
# print ("代理链接超时,去除此IP:{0}".format(self.current['http']))
self.delete_proxy()
return False
def get_proxy(self):
"""
获取未加工代理列表
:return:
"""
User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
header = dict()
header['User-Agent'] = User_Agent
for i in range(1, 5):
time.sleep(1)
url = 'http://www.xicidaili.com/nn/' + str(i)
res = requests.get(url=url, headers=header).content
soup = BeautifulSoup(res, "html.parser")
ips = soup.findAll('tr')
for x in range(1, len(ips)):
ip = ips[x]
tds = ip.findAll("td")
ip_temp = tds[1].contents[0] + ":" + tds[2].contents[0]
print(ip_temp)
self.proxy_list.append(ip_temp)
def filter_proxy(self):
"""
将不可用IP剔除
:return:
"""
socket.setdefaulttimeout(1)
path = os.path.join(os.path.dirname(__file__), './proxy_list')
f = open(path, "w")
head = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
'Connection': 'keep-alive'}
url = "http://icanhazip.com"
proxy_num = 0
for proxy in self.proxy_list:
proxy_temp = {"https": "https://{}".format(proxy)}
try:
req = requests.get(url, proxies=proxy_temp, timeout=2, headers=head).content
print(req)
write_proxy = proxy + "\n"
f.write(write_proxy)
proxy_num += 1
except Exception:
print ("代理链接超时,去除此IP:{0}".format(proxy))
continue
print("总共可使用ip量为{}个".format(proxy_num))
def get_filter_proxy(self):
"""
读取该可用ip文件
:return: 可用ip文件list
"""
path = os.path.join(os.path.dirname(__file__), './proxy_list')
try:
with open(path, "r", encoding="utf-8") as f:
lins = f.readlines()
for i in lins:
p = i.strip("\n")
self.proxy_filter_list.append(p)
except Exception:
with open(path, "r", ) as f:
lins = f.readlines()
for i in lins:
p = i.strip("\n")
self.proxy_filter_list.append(p)
return self.proxy_filter_list
def main(self):
# self.get_proxy()
self.filter_proxy()
def setProxy(self):
"""
开启此功能的时候请确保代理ip是否可用
查询的时候设置代理ip,ip设置格式是ip地址+端口,推荐可用的ip代理池:https://github.com/jhao104/proxy_pool
:return:
"""
ip = self.get_filter_proxy()
setIp = ip[random.randint(0, len(ip) - 1)]
proxie = {
'http': 'http://{}'.format(setIp),
'https': 'http://{}'.format(setIp),
}
return proxie
if __name__ == "__main__":
a = proxy()
print(a.get_filter_proxy())
def test():
a = proxy()
if(a.test_proxy() is False):
print(a.new_proxy())
else:
print(a.current)
| null |
agency/agency_tools.py
|
agency_tools.py
|
py
| 5,186 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "TickerConfig.PROXY_HOST",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "TickerConfig.PROXY_HOST",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "socket.setdefaulttimeout",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 139,
"usage_type": "call"
}
] |
64616738
|
import sys
from argparse import ArgumentParser
from Clanwars import Clanwars
from StatsRoyaleParser import StatsRoyaleParser
parser = ArgumentParser()
parser.add_argument("-r", "--refresh", dest="refresh", default=False, action="store_true", help="call refresh clan at statsroyale first")
parser.add_argument("-c", "--clanid", dest="clanid", type=str, required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument("-o", "--output", dest="outfile", type=str, help="csv output file")
group.add_argument("-u", "--userid", dest="userid", type=str, help="get user clanwar data by id")
group.add_argument("-n", "--username", dest="username", type=str, help="get user clanwar data by name")
args = parser.parse_args()
statsRoyaleParser = StatsRoyaleParser(args.clanid)
if args.refresh:
statsRoyaleParser.refreshClan()
# parse live (collection day or war day) clanwar
statsRoyaleParser.parseClanwar()
# parse history clanwars (max. last 10)
statsRoyaleParser.parseClanwarsHistory()
# get clanwars object
clanwars = statsRoyaleParser.getClanwars()
if args.outfile:
clanwars.createCSVFile(args.outfile)
elif args.userid:
clanwars.printPlayerById(args.userid)
elif args.username:
clanwars.printPlayerByName(args.username)
| null |
main.py
|
main.py
|
py
| 1,255 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "StatsRoyaleParser.StatsRoyaleParser",
"line_number": 17,
"usage_type": "call"
}
] |
152237431
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python [conda env:ncbi_remap]
# language: python
# name: conda-env-ncbi_remap-py
# ---
# %% [markdown]
# # Plots for BSC
# %%
import os
import sys
from pathlib import Path
from IPython.display import display, HTML, Markdown
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Project level imports
from ncbi_remap.notebook import Nb
# %%
# Setup notebook
nbconfig = Nb.setup_notebook(ref_dir='../references/lcdb-references')
# %%
# Connect to data store
store = pd.HDFStore('../output/sra.h5', mode='r')
# %%
from pymongo import MongoClient
try:
with open('../output/.mongodb_host', 'r') as fh:
host = fh.read().strip()
except FileNotFoundError:
host = 'localhost'
mongoClient = MongoClient(host=host, port=27017)
db = mongoClient['sramongo']
ncbi = db['ncbi']
# %%
sns.set_context('poster')
plt.rcParams.update(
{
'axes.titleweight': 'bold',
'axes.labelweight': 'bold'
}
)
# %% [markdown]
# # There is a lot of data.
# %%
df = (
pd.DataFrame(list(
ncbi.aggregate([
{'$unwind': {'path': '$runs'}},
{
'$project': {
'_id': False,
'srx': '$srx',
'srr': '$runs.srr',
'date': '$runs.load_date',
'size_MB': '$runs.size_MB'
}
},
])
))
.set_index(['srx', 'srr'])
.assign(date=lambda df: pd.to_datetime(df.date))
.sort_values('date')
.assign(cum_sum_TB = lambda df: df.size_MB.cumsum() / 1e6)
.assign(year=lambda df: df.date.dt.year)
.dropna()
.assign(year=lambda df: df.year.astype(int))
)
# %%
fig, ax = plt.subplots()
df.plot('date', 'cum_sum_TB', ax=ax, legend=False)
ax.fill_between(df.date.dt.to_pydatetime(), 0, df['cum_sum_TB'])
fig.autofmt_xdate(rotation=0, ha='center')
ax.margins(0)
ax.set(xlabel = 'Date', ylabel = 'Cumulative Data (TB)')
ax.set_title('Accumulated Data in the SRA\n(D. melanogaster)', family='serif')
fig.savefig('/home/fearjm/Documents/data_in_sra.svg', bbox_inches='tight')
# %% [markdown]
# # I can use the data to figure out library strategy
# %%
strategy = (
pd.read_parquet('../output/metadata-wf/select_library_strategy.parquet')
.rename(columns={'Fear_et_al_library_strategy': 'library_strategy'})
.library_strategy
.pipe(lambda x: x[~x.str.contains('\|')])
#.pipe(lambda x: x[x.isin(['RNA-Seq', 'WGS'])])
)
# %%
feature_names = (
pd.read_csv('../output/metadata-wf/random_forest_library_strategy_feature_importance.tsv', sep='\t', header=None, names=['feature', 'importance'])
.sort_values('importance', ascending=False)
.head(20)
.feature
.values
.tolist()
)
# %%
df = (
pd.read_parquet('../output/metadata-wf/build_library_strategy_feature_set.parquet', columns=feature_names)
.join(strategy, how='inner')
)
# %%
g = (
sns.PairGrid(df.sample(10_000), hue='library_strategy', palette='tab20')
.map(plt.scatter, s=50, alpha=.5, rasterized=True)
.add_legend()
)
plt.subplots_adjust(hspace=.1, wspace=.1)
for ax in g.axes[:, 0]:
ax.set_ylabel(ax.get_ylabel(), fontsize=12)
for ax in g.axes[-1, :]:
ax.set_xlabel(ax.get_xlabel(), fontsize=12)
g.savefig('../output/notebook/2019-02-21_lib_strategy_features.svg', bbox_inches='tight')
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
| null |
notebook/2019-02-21_plots.py
|
2019-02-21_plots.py
|
py
| 3,700 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ncbi_remap.notebook.Nb.setup_notebook",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ncbi_remap.notebook.Nb",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "pandas.HDFStore",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "seaborn.set_context",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "pandas.read_parquet",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "seaborn.PairGrid",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
}
] |
313727227
|
import os
from datetime import timedelta
DEBUG = True
SQLALCHEMY_DATABASE_URI = "sqlite:///data.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
PROPAGATE_EXCEPTIONS = True
JWT_SECRET_KEY = os.environ["APP_SECRET"]
SECRET_KEY = os.environ["APP_SECRET"]
JWT_ACCESS_TOKEN_EXPIRES = timedelta(
minutes=int(os.environ["ACCESS_TOKEN_EXPIRY_IN_MINS"])
)
JWT_REFRESH_TOKEN_EXPIRES = timedelta(
minutes=int(os.environ["ACCESS_TOKEN_EXPIRY_IN_MINS"])
)
UPLOADED_IMAGES_DEST = os.path.join("static", "images")
MAX_CONTENT_LENGTH = int(os.environ["MAX_CONTENT_LENGTH"])
| null |
default_config.py
|
default_config.py
|
py
| 562 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
}
] |
484347325
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
strip_jsonp,
unified_strdate,
US_RATINGS,
)
class PBSIE(InfoExtractor):
_STATIONS = (
('video.pbs.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/
('video.aptv.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/
('video.gpb.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/
('video.mpbonline.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org
('video.wnpt.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org
('video.wfsu.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/
('video.wsre.org', 'WSRE (WSRE)'), # http://www.wsre.org
('video.wtcitv.org', 'WTCI (WTCI)'), # http://www.wtcitv.org
('video.pba.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/
('video.alaskapublic.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm
# ('kuac.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/
# ('ktoo.org', '360 North (KTOO)'), # http://www.ktoo.org/
# ('azpm.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/
('video.azpbs.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org
('portal.knme.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/
('video.vegaspbs.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/
('watch.aetn.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/
('video.ket.org', 'KET (WKLE)'), # http://www.ket.org/
('video.wkno.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/
('video.lpb.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/
('videos.oeta.tv', 'OETA (KETA)'), # http://www.oeta.tv
('video.optv.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/
('watch.wsiu.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/
('video.keet.org', 'KEET TV (KEET)'), # http://www.keet.org
('pbs.kixe.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/
('video.kpbs.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/
('video.kqed.org', 'KQED (KQED)'), # http://www.kqed.org
('vids.kvie.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org
('video.pbssocal.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/
('video.valleypbs.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/
('video.cptv.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org
('watch.knpb.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/
('video.soptv.org', 'SOPTV (KSYS)'), # http://www.soptv.org
# ('klcs.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org
# ('krcb.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org
# ('kvcr.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org
('video.rmpbs.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org
('video.kenw.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org
('video.kued.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org
('video.wyomingpbs.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org
('video.cpt12.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/
('video.kbyueleven.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/
('video.thirteen.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org
('video.wgbh.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org
('video.wgby.org', 'WGBY (WGBY)'), # http://www.wgby.org
('watch.njtvonline.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/
# ('ripbs.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/
('watch.wliw.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/
('video.mpt.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org
('watch.weta.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org
('video.whyy.org', 'WHYY (WHYY)'), # http://www.whyy.org
('video.wlvt.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/
('video.wvpt.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net
('video.whut.org', 'Howard University Television (WHUT)'), # http://www.whut.org
('video.wedu.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org
('video.wgcu.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/
# ('wjct.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org
('video.wpbt2.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org
('video.wucftv.org', 'WUCF TV (WUCF)'), # http://wucftv.org
('video.wuft.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org
('watch.wxel.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/
('video.wlrn.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/
('video.wusf.usf.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/
('video.scetv.org', 'ETV (WRLK)'), # http://www.scetv.org
('video.unctv.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/
# ('pbsguam.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/
('video.pbshawaii.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/
('video.idahoptv.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org
('video.ksps.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/
('watch.opb.org', 'OPB (KOPB)'), # http://www.opb.org
('watch.nwptv.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org
('video.will.illinois.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/
('video.networkknowledge.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv
('video.wttw.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/
# ('wtvp.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/
('video.iptv.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/
('video.ninenet.org', 'Nine Network (KETC)'), # http://www.ninenet.org
('video.wfwa.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/
('video.wfyi.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org
('video.mptv.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org
('video.wnin.org', 'WNIN (WNIN)'), # http://www.wnin.org/
('video.wnit.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/
('video.wpt.org', 'WPT (WPNE)'), # http://www.wpt.org/
('video.wvut.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/
('video.weiu.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net
('video.wqpt.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org
('video.wycc.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org
# ('lakeshorepublicmedia.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/
('video.wipb.org', 'WIPB-TV (WIPB)'), # http://wipb.org
('video.indianapublicmedia.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/
('watch.cetconnect.org', 'CET (WCET)'), # http://www.cetconnect.org
('video.thinktv.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org
('video.wbgu.org', 'WBGU-TV (WBGU)'), # http://wbgu.org
('video.wgvu.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/
('video.netnebraska.org', 'NET1 (KUON)'), # http://netnebraska.org
('video.pioneer.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org
('watch.sdpb.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org
('video.tpt.org', 'TPT (KTCA)'), # http://www.tpt.org
('watch.ksmq.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/
('watch.kpts.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/
('watch.ktwu.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org
# ('shptv.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org
# ('kcpt.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/
# ('blueridgepbs.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/
('watch.easttennesseepbs.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org
('video.wcte.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org
('video.wljt.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/
('video.wosu.org', 'WOSU TV (WOSU)'), # http://wosu.org/
('video.woub.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5
('video.wvpublic.org', 'WVPB (WVPB)'), # http://wvpublic.org/
('video.wkyupbs.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org
# ('wyes.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org
('video.kera.org', 'KERA 13 (KERA)'), # http://www.kera.org/
('video.mpbn.net', 'MPBN (WCBB)'), # http://www.mpbn.net/
('video.mountainlake.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/
('video.nhptv.org', 'NHPTV (WENH)'), # http://nhptv.org/
('video.vpt.org', 'Vermont PBS (WETK)'), # http://www.vpt.org
('video.witf.org', 'witf (WITF)'), # http://www.witf.org
('watch.wqed.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/
('video.wmht.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/
('video.deltabroadcasting.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org
('video.dptv.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/
('video.wcmu.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org
('video.wkar.org', 'WKAR-TV (WKAR)'), # http://wkar.org/
('wnmuvideo.nmu.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu
('video.wdse.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/
('video.wgte.org', 'WGTE TV (WGTE)'), # http://www.wgte.org
('video.lptv.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org
# ('prairiepublic.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/
('video.kmos.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/
('watch.montanapbs.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org
('video.krwg.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org
('video.kacvtv.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/
('video.kcostv.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org
('video.wcny.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org
('video.wned.org', 'WNED (WNED)'), # http://www.wned.org/
('watch.wpbstv.org', 'WPBS (WPBS)'), # http://www.wpbstv.org
('video.wskg.org', 'WSKG Public TV (WSKG)'), # http://wskg.org
('video.wxxi.org', 'WXXI (WXXI)'), # http://wxxi.org
('video.wpsu.org', 'WPSU (WPSU)'), # http://www.wpsu.org
# ('wqln.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org
('on-demand.wvia.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/
('video.wtvi.org', 'WTVI (WTVI)'), # http://www.wtvi.org/
# ('whro.org', 'WHRO (WHRO)'), # http://whro.org
('video.westernreservepublicmedia.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/
('video.ideastream.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/
('video.kcts9.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/
('video.basinpbs.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org
('video.houstonpbs.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/
# ('tamu.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu
# ('kedt.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org
('video.klrn.org', 'KLRN (KLRN)'), # http://www.klrn.org
('video.klru.tv', 'KLRU (KLRU)'), # http://www.klru.org
# ('kmbh.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org
# ('knct.org', 'KNCT (KNCT)'), # http://www.knct.org
# ('ktxt.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org
('video.wtjx.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/
('video.ideastations.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/
('video.kbtc.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org
)
IE_NAME = 'pbs'
IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1])
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
(?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
(?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
''' % '|'.join(re.escape(p) for p in list(zip(*_STATIONS))[0])
_TESTS = [
{
'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
'md5': 'ce1888486f0908d555a8093cac9a7362',
'info_dict': {
'id': '2365006249',
'ext': 'mp4',
'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
'duration': 3190,
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/',
'md5': '143c98aa54a346738a3d78f54c925321',
'info_dict': {
'id': '2365297690',
'ext': 'mp4',
'title': 'FRONTLINE - Losing Iraq',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 5050,
},
'params': {
'skip_download': True, # requires ffmpeg
}
},
{
'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/',
'md5': 'b19856d7f5351b17a5ab1dc6a64be633',
'info_dict': {
'id': '2201174722',
'ext': 'mp4',
'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
'duration': 801,
},
},
{
'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
'md5': 'c62859342be2a0358d6c9eb306595978',
'info_dict': {
'id': '2365297708',
'ext': 'mp4',
'description': 'md5:68d87ef760660eb564455eb30ca464fe',
'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
'md5': '908f3e5473a693b266b84e25e1cf9703',
'info_dict': {
'id': '2365160389',
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
'title': 'NOVA - Killer Typhoon',
'duration': 3172,
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140122',
'age_limit': 10,
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
'info_dict': {
'id': 'united-states-of-secrets',
},
'playlist_count': 2,
},
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/',
'info_dict': {
'id': '2276541483',
'display_id': 'player',
'ext': 'mp4',
'title': 'American Experience - Death and the Civil War, Chapter 1',
'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
'duration': 682,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://video.pbs.org/video/2365367186/',
'info_dict': {
'id': '2365367186',
'display_id': '2365367186',
'ext': 'mp4',
'title': 'To Catch A Comet - Full Episode',
'description': 'On November 12, 2014, billions of kilometers from Earth, spacecraft orbiter Rosetta and lander Philae did what no other had dared to attempt \u2014 land on the volatile surface of a comet as it zooms around the sun at 67,000 km/hr. The European Space Agency hopes this mission can help peer into our past and unlock secrets of our origins.',
'duration': 3342,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
'skip': 'Expired',
},
{
# Video embedded in iframe containing angle brackets as attribute's value (e.g.
# "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see
# https://github.com/rg3/youtube-dl/issues/7059)
'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/',
'info_dict': {
'id': '2365546844',
'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
'ext': 'mp4',
'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
'description': 'md5:61db2ddf27c9912f09c241014b118ed1',
'duration': 1480,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
# Frontline video embedded via flp2012.js
'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists',
'info_dict': {
'id': '2070868960',
'display_id': 'the-atomic-artists',
'ext': 'mp4',
'title': 'FRONTLINE - The Atomic Artists',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 723,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
'only_matching': True,
},
{
'url': 'http://watch.knpb.org/video/2365616055/',
'only_matching': True,
}
]
_ERRORS = {
101: 'We\'re sorry, but this video is not yet available.',
403: 'We\'re sorry, but this video is not available in your region due to right restrictions.',
404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.',
410: 'This video has expired and is no longer available for online streaming.',
}
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id')
display_id = presumptive_id
if presumptive_id:
webpage = self._download_webpage(url, display_id)
upload_date = unified_strdate(self._search_regex(
r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
webpage, 'upload date', default=None))
# tabbed frontline videos
tabbed_videos = re.findall(
r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
if tabbed_videos:
return tabbed_videos, presumptive_id, upload_date
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
]
media_id = self._search_regex(
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
if media_id:
return media_id, presumptive_id, upload_date
# Fronline video embedded via flp
video_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
if video_id:
# pkg_id calculation is reverse engineered from
# http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js
prg_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:]
if 'q' in prg_id:
prg_id = prg_id.split('q')[1]
prg_id = int(prg_id, 16)
getdir = self._download_json(
'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
presumptive_id, 'Downloading getdir JSON',
transform_source=strip_jsonp)
return getdir['mid'], presumptive_id, upload_date
for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage):
url = self._search_regex(
r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe,
'player URL', default=None, group='url')
if url:
break
mobj = re.match(self._VALID_URL, url)
player_id = mobj.group('player_id')
if not display_id:
display_id = player_id
if player_id:
player_page = self._download_webpage(
url, display_id, note='Downloading player page',
errnote='Could not download player page')
video_id = self._search_regex(
r'<div\s+id="video_([0-9]+)"', player_page, 'video ID')
else:
video_id = mobj.group('id')
display_id = video_id
return video_id, display_id, None
def _real_extract(self, url):
video_id, display_id, upload_date = self._extract_webpage(url)
if isinstance(video_id, list):
entries = [self.url_result(
'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
for vid_id in video_id]
return self.playlist_result(entries, display_id)
info = self._download_json(
'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
display_id)
formats = []
for encoding_name in ('recommended_encoding', 'alternate_encoding'):
redirect = info.get(encoding_name)
if not redirect:
continue
redirect_url = redirect.get('url')
if not redirect_url:
continue
redirect_info = self._download_json(
redirect_url + '?format=json', display_id,
'Downloading %s video url info' % encoding_name)
if redirect_info['status'] == 'error':
raise ExtractorError(
'%s said: %s' % (
self.IE_NAME,
self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])),
expected=True)
format_url = redirect_info.get('url')
if not format_url:
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4', preference=1, m3u8_id='hls'))
else:
formats.append({
'url': format_url,
'format_id': redirect.get('eeid'),
})
self._sort_formats(formats)
rating_str = info.get('rating')
if rating_str is not None:
rating_str = rating_str.rpartition('-')[2]
age_limit = US_RATINGS.get(rating_str)
subtitles = {}
closed_captions_url = info.get('closed_captions_url')
if closed_captions_url:
subtitles['en'] = [{
'ext': 'ttml',
'url': closed_captions_url,
}]
# info['title'] is often incomplete (e.g. 'Full Episode', 'Episode 5', etc)
# Try turning it to 'program - title' naming scheme if possible
alt_title = info.get('program', {}).get('title')
if alt_title:
info['title'] = alt_title + ' - ' + re.sub(r'^' + alt_title + '[\s\-:]+', '', info['title'])
return {
'id': video_id,
'display_id': display_id,
'title': info['title'],
'description': info['program'].get('description'),
'thumbnail': info.get('image_url'),
'duration': int_or_none(info.get('duration')),
'age_limit': age_limit,
'upload_date': upload_date,
'formats': formats,
'subtitles': subtitles,
}
| null |
youtube_dl/extractor/pbs.py
|
pbs.py
|
py
| 26,698 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "common.InfoExtractor",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "re.escape",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "utils.unified_strdate",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "utils.strip_jsonp",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "utils.ExtractorError",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "utils.determine_ext",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "utils.US_RATINGS.get",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "utils.US_RATINGS",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "utils.int_or_none",
"line_number": 498,
"usage_type": "call"
}
] |
492905381
|
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from scipy import integrate
import numpy as np
import time
from torchdiffeq import odeint_adjoint as odeint
import matplotlib.pyplot as plt
#torch.manual_seed(1) # reproducible
class Simple2d(nn.Module):
def forward(self, t, x):
return torch.mm(x**3, true_A)
class LotkaVolterra(nn.Module):
def __init__(self, theta):
super(LotkaVolterra,self).__init__()
self.theta=theta
def forward(self, t, x):
# a = 1.
# b = 0.1
# c = 1.5
# d = 0.75
a, b, c, d = self.theta[0], self.theta[1], self.theta[2], self.theta[3]
return torch.tensor([ a*x[0] - b*x[0]*x[1],
-c*x[1] + d*b*x[0]*x[1] ])
class FHN(nn.Module):
def __init__(self, theta):
super(FHN, self).__init__()
self.theta = theta
def forward(self, t, x):
# theta1 = 0.2
# theta2 = 0.2
# theta3 = 3.0
theta1, theta2, theta3 = self.theta[0], self.theta[1], self.theta[2]
return torch.tensor([theta3 * (x[0] - x[0]**3 / 3.0 + x[1]),
- 1.0 / theta3 * (x[0] - theta1 + theta2 * x[1])])
class Lorenz63(nn.Module):
def __init__(self, theta):
super(Lorenz63, self).__init__()
self.theta = theta
def forward(self, t, x):
theta = self.theta
return torch.tensor([theta[0] * (x[1] - x[0]),
theta[1] * x[0] - x[1] - x[0] * x[2],
x[0] * x[1] - theta[2] * x[2]
])
class Lorenz96(nn.Module):
def __init__(self, theta):
super(Lorenz96, self).__init__()
self.theta = theta
def forward(self, t, x):
theta = self.theta
f = []
for n in range(100):
f.append((x[(n+1) % 100] - x[(n+98) % 100]) * x[(n+99) % 100]
- x[n % 100] + theta)
return torch.tensor(f)
class ChemicalReactionSimple(nn.Module):
def __init__(self, theta):
super(ChemicalReactionSimple, self).__init__()
self.theta = theta
def forward(self, t, x):
theta = self.theta
firstODE = theta[0] - theta[1]*x[0]*x[1]**2
secondODE = theta[1]*x[0]*x[1]**2 - theta[2]*x[1]
return torch.tensor([firstODE, secondODE])
class Chemostat(nn.Module):
def __init__(self, nSpecies, D, S0, theta):
super(Chemostat, self).__init__()
self.nSpecies = nSpecies
self.D = D
self.S0 = S0
self.theta = theta
def forward(self, t, x):
theta = self.theta
assert theta.size == 3*self.nSpecies
# assert x_in.size == 1 + self.nSpecies
if type(x) is np.ndarray:
x = torch.from_numpy(x.T)
Cetas = torch.from_numpy(theta[:self.nSpecies])
Vs = torch.from_numpy(theta[self.nSpecies:2*self.nSpecies])
Ks = torch.from_numpy(theta[-self.nSpecies:])
xFis = self._MMTerm(torch.ones_like(Vs)*x[-1], Vs, Ks)*x[:self.nSpecies]
bacDerivs = -self.D*x[:self.nSpecies] \
+ xFis*Cetas
subDeriv = self.D*(self.S0 - x[-1]) \
- torch.sum(xFis)
return torch.from_numpy(np.squeeze(np.concatenate([bacDerivs.reshape([1, -1]),
np.asarray(subDeriv).reshape([1, 1])],
axis=1)))
def _MMTerm(self, S, VMax, KM):
"""
Describes the uptake of nutrients by the bacterium using a
Michaelis-Menten term.
Parameters
----------
S: scalar or vector of size n
curent substrate concentration
VMax: scalar or vector of size n
Michaelis-Menten multiplicative constant
KM: scalar or vector of size n
Michaelis-Menten denominator constant
"""
VMax = VMax
S = S
KM = KM
return VMax*S/(S+KM)
class Clock(nn.Module):
def __init__(self, theta):
"""
Initializes the clock model as given by "Methods and Models in
Mathematical Biology" by Müller and Kuttler, section 5.2.7, using n=3.
"""
super(Clock,self).__init__()
self.theta = theta
def forward(self, t, x):
# TODO: adapt
"""
Returns the vector of state derivatives of the clock system.
Parameters
----------
x: vector of length 7
states of the clock system.
"""
theta = np.asarray(self.theta)
assert theta.size == 17
# assert x.size() == 7
a1 = theta[0]
a3 = theta[1]
a4 = theta[2]
gammaC = theta[3]
gammaE = theta[4]
gamma3 = theta[5]
gamma4 = theta[6]
d1 = theta[7]
d2 = theta[8]
pi1P = theta[9]
pi1M = theta[10]
pi2P = theta[11]
pi2M = theta[12]
pi3P = theta[13]
pi3M = theta[14]
b1 = theta[15]
k1 = theta[16]
xc = x[0]
xe = x[1]
l = x[2]
r = x[3]
y1 = x[4]
y2 = x[5]
y3 = x[6]
xcDeriv = a1*l - (gammaC + d1)*xc + d2*xe - pi1P*r*xc + pi1M*y1
xeDeriv = d1*xc - d2*xe - gammaE*xe
lDeriv = a3 - gamma3*l + b1*y3/(1+b1/k1*y3)
rDeriv = a4 - gamma4*r -pi1P*r*xc + pi1M*y1
y1Deriv = pi1P*r*xc - pi1M*y1 - 2*pi2P*y1**2 + 2*pi2M*y2 - pi3P*y1*y2 + pi3M*y3
y2Deriv = pi2P*y1**2 - pi2M*y2 + pi3M*y3 - pi3P*y1*y2
y3Deriv = pi3P*y1*y2 - pi3M*y3
return torch.tensor([xcDeriv,
xeDeriv,
lDeriv,
rDeriv,
y1Deriv,
y2Deriv,
y3Deriv])
class ProteinTransduction(nn.Module):
def __init__(self, theta):
super(ProteinTransduction, self).__init__()
self.theta = theta
def forward(self, t, x):
theta = np.asarray(self.theta)
f = [- theta[0] * x[0] - theta[1] * x[0] * x[2] + theta[2] * x[3],
theta[0] * x[0],
- theta[1] * x[0] * x[2] + theta[2] * x[3] +
theta[4] * x[4] / (theta[5] + x[4]),
theta[1] * x[0] * x[2] - theta[2] * x[3] - theta[3] * x[3],
theta[3] * x[3] - theta[4] * x[4] / (theta[5] + x[4])]
return torch.tensor(f)
def generate_data(datfunc, true_x0,t,method=None):
with torch.no_grad():
true_x = odeint(datfunc, true_x0, t, method=method)
return true_x
def get_batch(data_size, batch_time, batch_size,true_x, true_der, t):
s = torch.from_numpy(np.random.choice(np.arange(data_size - batch_time, dtype=np.int64), batch_size, replace=False))
batch_x0 = true_x[s] # (Batch_Size, Dimension)
batch_t = t[:batch_time] # (Time_step)
batch_x = torch.stack([true_x[s + i] for i in range(batch_time)], dim=0) # (Time_step, Batch_size, Dimension)
batch_der = torch.stack([true_der[s + i] for i in range(batch_time)], dim=0)
return batch_x0, batch_t, batch_x, batch_der
| null |
Random_Search/dataset_def.py
|
dataset_def.py
|
py
| 7,293 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.mm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torchdiffeq.odeint_adjoint",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "torch.stack",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 255,
"usage_type": "call"
}
] |
515808778
|
"""PyWeek Contest 2020 by GUVI"""
"""PySnake Game Created by J.KALIRAJ"""
"""GUVI ID : [email protected]"""
import pygame
import random
pygame.init()
gray= (180,180,180)
gray2= (200,200,200)
white= (255,255,255)
black= (0,0,0)
red = (244,0,6)
green = (0,155,0)
display_width = 800
display_height = 600
block_size =20
appleThickness = 30
FPS = 10
img = pygame.image.load('Files/Snakehead.png')
appleimg= pygame.image.load('Files/Apple.png')
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('PySnake 2020')
pygame.display.update()
pygame.display.set_icon(img)
clock = pygame.time.Clock()
smallfont= pygame.font.SysFont(None, 25)
medfont= pygame.font.SysFont(None, 50)
largefont= pygame.font.SysFont("comicsansms", 80)
def gameIntro():
intro = True
introimg = pygame.image.load('Files/introimg.png')
gameDisplay.blit(introimg, (0,0))
pygame.mixer.music.set_volume(0.1)
menu_song = pygame.mixer.music.load("Files/menu.ogg")
pygame.mixer.music.play(-1)
while intro:
#gameDisplay.fill(gray)
#message_to_screen("PyWeek Contest 2020", red, -250, "medium")
#message_to_screen("Welcome to PySnake", green, -150, "large")
#message_to_screen("Press C to Continue", green, 150, "medium")
#message_to_screen("Press H to Help", green, 180, "medium")
#message_to_screen("Press Q to Quit", red, 210, "medium")
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_e:
pygame.quit()
quit()
elif event.key == pygame.K_p:
intro = False
pygame.mixer.music.stop()
ready = pygame.mixer.Sound('Files/getready.ogg')
ready.play()
gameDisplay.fill(black)
message_to_screen("Get Ready!", green, 50, "medium")
pygame.display.update()
pygame.time.wait(3000)
gameLoop()
elif event.key == pygame.K_h:
helper()
if event.type == pygame.QUIT :
pygame.quit
quit()
pygame.display.update()
clock.tick(5)
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, red)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def helper():
helpmenu = True
gameDisplay.fill(white)
message_to_screen("GAMEPLAY", red, -210, "medium")
message_to_screen("* The objective of the game is to eat red apples", green, -170)
message_to_screen("* The more apples you eat, the longer you become", green, -140)
message_to_screen("* You will lose if you collide with yourself or the edges", green, -110)
message_to_screen("CONTROLS", red, -20, "medium")
message_to_screen("* Use Arrow keys for Movement/Direction", green, 10)
message_to_screen("* Press ESCAPE key for Pause", green, 40)
message_to_screen("* Press C to Continue, Q to Quit", green, 70)
message_to_screen("Press B to Back",black, 250, "medium")
draw_text(gameDisplay,"Credits : PyWeek Contest 2020 GUVI TEAM & NEC COLLAGE KOVILPATTI" , 14, 400, 580)
pygame.display.update()
while helpmenu:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_b:
gameIntro()
intro = True
if event.type == pygame.QUIT :
pygame.quit
quit()
def pause():
paused = True
message_to_screen("Paused", black, -50, "large")
message_to_screen("Press C to continue or Q to quit ", black, 20)
pygame.display.update()
while paused:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_q:
pygame.quit()
quit()
elif event.key == pygame.K_c:
paused = False
if event.type == pygame.QUIT :
pygame.quit
quit()
clock.tick(5)
def score(score):
img1 = pygame.image.load('Files/score.jpg')
gameDisplay.blit(img1, (10,-10))
draw_text(gameDisplay,str(score) , 30, 180, 5)
global finalscore
finalscore=str(score)
myscore=int(score)
global level
level = 1
if myscore >= 50:
level = level +1
if myscore >= 100:
level = level +1
if myscore >= 200:
level = level +1
if myscore >= 500:
level = level +1
img1 = pygame.image.load('Files/level.jpg')
gameDisplay.blit(img1, (600,-5))
draw_text(gameDisplay,str(level) , 30, 750, 5)
def apple(applex, appley):
gameDisplay.blit(appleimg, (applex, appley))
def randAppleGen():
randapple_y = round(random.randrange(30,display_height - appleThickness))
randapple_x = round(random.randrange(30,display_width - appleThickness))
return randapple_x, randapple_y
def snake(snakeList, block_size):
if direction == 'right':
head= pygame.transform.rotate(img, 270)
if direction == 'left':
head= pygame.transform.rotate(img, 90)
if direction == 'up':
head= pygame.transform.rotate(img, 0)
if direction == 'down':
head= pygame.transform.rotate(img, 180)
gameDisplay.blit(head, (snakeList[-1][0], snakeList[-1][1]))
for XnY in snakeList[:-1] :
pygame.draw.rect(gameDisplay,green,[XnY[0],XnY[1],block_size,block_size])
def text_Objects(text, color, size):
if size == "small":
textSurface = smallfont.render(text, True, color)
elif size == "medium":
textSurface = medfont.render(text, True, color)
elif size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
def message_to_screen(msg, color,y_displace =0, size = "small"):
textSurf, textRect = text_Objects(msg, color, size)
textRect.center = (display_width/2), (display_height/2) + y_displace
gameDisplay.blit(textSurf,textRect)
def gameLoop():
menu_song = pygame.mixer.music.load("Files/menu.ogg")
pygame.mixer.music.play(-1)
global direction
global mode
direction = 'right'
gameExit= False
gameOver= False
snakeList = []
snakeLength = 1
growthperapple =1
snakeSpeed = block_size
lead_x = display_width/2
lead_y = display_height/2
lead_xchange= snakeSpeed
lead_ychange= 0
randapple_x, randapple_y = randAppleGen()
while not gameExit:
if gameOver == True:
#gameDisplay.fill(white)
pygame.mixer.music.pause()
ready = pygame.mixer.Sound('Files/gameover.wav')
ready.play()
img1 = pygame.image.load('Files/gameover.jpg')
gameDisplay.blit(img1, (0,0))
draw_text(gameDisplay,finalscore , 30, 430, 340)
pygame.display.update()
while gameOver == True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_q:
gameOver = False
gameExit = True
pygame.quit
quit()
elif event.key == pygame.K_c:
gameLoop()
if event.type == pygame.QUIT :
gameOver = False
gameExit = True
pygame.quit
quit()
for event in pygame.event.get():
#print(event)
if event.type == pygame.QUIT :
gameExit = True
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_RIGHT:
lead_xchange = snakeSpeed
lead_ychange = 0
direction= 'right'
if event.key == pygame.K_LEFT:
lead_xchange = -snakeSpeed
lead_ychange = 0
direction= 'left'
if event.key == pygame.K_DOWN:
lead_ychange = snakeSpeed
lead_xchange = 0
direction= 'down'
if event.key == pygame.K_UP:
lead_ychange = -snakeSpeed
lead_xchange = 0
direction= 'up'
if event.key == pygame.K_ESCAPE:
pause()
if lead_x >= display_width or lead_x < 0 or lead_y >= display_height or lead_y < 0:
gameOver = True
lead_x += lead_xchange
lead_y += lead_ychange
if lead_x >= randapple_x and lead_x < randapple_x +appleThickness or lead_x + block_size > randapple_x and lead_x + block_size < randapple_x + appleThickness:
if lead_y >= randapple_y and lead_y <randapple_y +appleThickness or lead_y + block_size > randapple_y and lead_y + block_size < randapple_y + appleThickness:
randapple_x, randapple_y = randAppleGen()
snakeLength += growthperapple
gameDisplay.fill(white)
snakeHead = []
snakeHead.append(lead_x)
snakeHead.append(lead_y)
snakeList.append(snakeHead)
if len(snakeList) >(snakeLength):
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead :
gameOver = True
apple(randapple_x,randapple_y)
snake(snakeList, block_size)
score((snakeLength-1)*10)
pygame.display.update()
clock.tick(FPS)
gameIntro()
pygame.quit
quit()
| null |
PySnake.py
|
PySnake.py
|
py
| 10,320 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_icon",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.set_volume",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_e",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.K_p",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.stop",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.wait",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_h",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.match_font",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_b",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_q",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "pygame.K_c",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pygame.transform.rotate",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotate",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotate",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotate",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.pause",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_q",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_c",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 294,
"usage_type": "attribute"
}
] |
167135986
|
#!/usr/bin/env python
import torch
import json
import pdb
import numpy as np
from torch.autograd import Variable
import os
import argparse
import datasets
import old_datasets
import models
import pickle
import time
import random
import monitoring
import training
import evaluations
#
def build_parser():
parser = argparse.ArgumentParser(description="")
### Hyperparameter options
parser.add_argument('--epoch', default=10, type=int, help='The number of epochs we want ot train the network.')
parser.add_argument('--seed', default=260389, type=int, help='Seed for random initialization and stuff.')
parser.add_argument('--batch-size', default=1, type=int, help="The batch size.")
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
### Dataset specific options
parser.add_argument('--data-dir', default='./data/', help='The folder contaning the dataset.')
parser.add_argument('--data-file', default='.', help='The data file with the dataset.')
parser.add_argument('--dataset', choices=['tcr','hla_tcr',
'binary_hla_tcr'], default='tcr', help='Which dataset to use.')
parser.add_argument('--tenth', default=0, type=int, help='test set only - fraction')
parser.add_argument('--transform', default=True,help='log10(exp+1)')
parser.add_argument('--nb-patient', default=5,type=int, help='nb of different patients')
parser.add_argument('--tcr-size', default=27,type=int, help='length of the TCR sequence')
parser.add_argument('--hla-size', default=34,type=int, help='length of the HLA sequence')
parser.add_argument('--nb-kmer', default=1000,type=int, help='nb of different kmers')
parser.add_argument('--cache', default=0, help='cache prefix for the dataset')
parser.add_argument('--nb-tcr-to-sample', default=10000,type=int, help='nb of TCR to sample')
# Model specific options
parser.add_argument('--tcr-conv-layers-sizes', default=[20,1,18], type=int, nargs='+', help='TCR-Conv net config.')
parser.add_argument('--hla-conv-layers-sizes', default=[20,1,25], type=int, nargs='+', help='HLA-Conv net config.')
parser.add_argument('--mlp-layers-size', default=[250, 75, 50, 25, 10], type=int, nargs='+', help='MLP config')
parser.add_argument('--emb_size', default=10, type=int, help='The size of the embeddings.')
parser.add_argument('--loss', choices=['NLL', 'MSE'], default = 'MSE', help='The cost function to use')
parser.add_argument('--weight-decay', default=0, type=float, help='Weight decay parameter.')
parser.add_argument('--model', choices=['RNN','TCRonly',
'allseq','allseq_bin'], default='TCRonly', help='Which model to use.')
parser.add_argument('--cpu', action='store_true', help='True if no gpu to be used')
parser.add_argument('--name', type=str, default=None, help="If we want to add a random str to the folder.")
parser.add_argument('--gpu-selection', type=int, default=0, help="gpu selection")
# Monitoring options
parser.add_argument('--plot-frequency', default=1, type=int, help='frequency (in nb epochs at which to generate training curve')
parser.add_argument('--load-folder', help='The folder where to load and restart the training.')
parser.add_argument('--save-dir', default='./testing123/', help='The folder where everything will be saved.')
return parser
def parse_args(argv):
if type(argv) == list or argv is None:
opt = build_parser().parse_args(argv)
else:
opt = argv
return opt
def main(argv=None):
opt = parse_args(argv)
# TODO: set the seed
seed = opt.seed
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
if opt.cache==0:
opt.cache = random.getrandbits(128)
exp_dir = opt.load_folder
if exp_dir is None: # we create a new folder if we don't load.
exp_dir = monitoring.create_experiment_folder(opt)
if opt.model == 'RNN':
print ('This model is deprecated - please use TCRonly from now on')
# creating the dataset
print ("Getting the dataset...")
if not 'cached_dataset' in os.listdir('.'):
os.mkdir('cached_dataset')
opt.dataset = 'binary_same'
tenth=opt.tenth
#dataset = datasets.get_dataset(opt,exp_dir,tenth=opt.tenth)
dataset = old_datasets.get_dataset(opt,exp_dir,test=True)
# Creating a model
print ("Getting the model...")
my_model, optimizer, epoch, opt = monitoring.load_checkpoint(exp_dir, opt, dataset.dataset.input_size(), )
criterion = torch.nn.MSELoss()
# Training optimizer and stuff
if opt.loss == 'NLL' or opt.model=='allseq_bin':
criterion = torch.nn.NLLLoss()
criterion = torch.nn.BCELoss()
if not 'tcr_embs' in os.listdir(exp_dir):
if opt.model == 'TCRonly':
os.mkdir(f'{exp_dir}/tcr_embs/')
elif opt.model == 'allseq' or opt.model == 'allseq_bin':
os.mkdir(f'{exp_dir}/tcr_embs/')
os.mkdir(f'{exp_dir}/hla_embs/')
if not opt.cpu:
print ("Putting the model on gpu...")
my_model.cuda(opt.gpu_selection)
loss_dict = {}
loss_dict['train_losses'] = []
def estimate_batch_accuracy(y,yhat):
return np.sum([i==j for i,j in zip(y,yhat)])/y.shape[0]
if opt.model == 'allseq' or opt.model == 'allseq_bin':
valid_list = np.load('/u/trofimov/Emerson/processed_data/valid_list.npy')
loss_dict['valid_losses'] = []
# The training.
print ("Getting the likelihood")
os.mkdir(f'{exp_dir}/sametenth{tenth}_preds_100/')
#monitoring and predictions
for t in range(1):
loss_dict = monitoring.update_loss_dict(loss_dict,start = True)
if opt.model == 'allseq_bin':
good = 0
for no_b, mini in enumerate(dataset):
if opt.model == 'TCRonly':
y_pred, my_model, targets = training.TCRonly_batch(mini,opt,my_model)
np.save(f'{exp_dir}/preds_100/likelihood_batch{no_b}.npy',y_pred.data.cpu().numpy())
if no_b % 5 == 0:
print (f"Doing epoch{t},examples{no_b}/{len(dataset)}")
# Saving the emb
elif opt.model == 'allseq':
inputs_k,inputs_h1, inputs_h2, inputs_h3, inputs_h4, targets = training.allseq_batch(mini,opt)
y_pred = my_model(inputs_k,inputs_h1, inputs_h2, inputs_h3,
inputs_h4).float()
np.save(f'{exp_dir}/preds_100/likelihood_batch{no_b}.npy',y_pred.data.cpu().numpy())
batch_number = dataset.dataset.data[no_b]
bn = batch_number[0]
np.save(f'{exp_dir}/preds_100/likelihood_batch{bn}.npy',y_pred.data.cpu().numpy())
if no_b % 5 == 0:
print (f"Doing epoch {t},examples{no_b}/{len(dataset)}")
elif opt.model == 'allseq_bin':
inputs_k, inputs_h1, inputs_h2, inputs_h3, inputs_h4, targets = training.binallseq_batch(mini,opt)
y_pred = my_model(inputs_k,inputs_h1, inputs_h2, inputs_h3,
inputs_h4).float()
batch_number = dataset.dataset.data[no_b]
bn = batch_number[0]
np.save(f'{exp_dir}/sametenth{tenth}_preds_100/likelihood_batch{bn}.npy',y_pred.data.cpu().numpy())
if no_b % 5 == 0:
print (f"Doing epoch {t},examples{no_b}/{len(dataset)}")
if __name__ == '__main__':
main()
| null |
predict_likelihood_same.py
|
predict_likelihood_same.py
|
py
| 7,705 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "random.getrandbits",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "monitoring.create_experiment_folder",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "old_datasets.get_dataset",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "monitoring.load_checkpoint",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCELoss",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "monitoring.update_loss_dict",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "training.TCRonly_batch",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "training.allseq_batch",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "training.binallseq_batch",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 186,
"usage_type": "call"
}
] |
433001737
|
from typing import List
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
#sort the list
intervals.sort(key=lambda x:x[0])
# init the first ele
res=[intervals[0]]
#loop through the array expect the first
for start,end in intervals[1:]:
#last ele of the res and end intervel
lastEnd =res[-1][1]
# if timeline is in the range do it
if start <= lastEnd:
res[-1][1] = max(lastEnd,end)
else:
# res append
res.append([start,end])
return res
| null |
leetcode/marge_intervals.py
|
marge_intervals.py
|
py
| 561 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
71120126
|
#!/usr/bin/python
"""Plot order parameter series for all runs and reps.
Plots number of bound staples, number of bound domains, number of misbound
domains, and number of stacked pairs.
"""
import argparse
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import origamipy.plot as plot
def main():
args = parse_args()
skip = 1
out_filebase = '{}/{}-{}_timeseries'.format(args.output_dir,
args.system, args.vari)
tags = ['numstaples', 'numfulldomains', 'nummisdomains', 'numstackedpairs']
labels = [
'Bound staples',
'Bound domains',
'Misbound domains',
'Stacked pairs']
figsize = (plot.cm_to_inches(18), plot.cm_to_inches(36))
plot.set_default_appearance()
f, axes = plt.subplots(3, 1, figsize=figsize, dpi=300)
for rep in range(args.reps):
ax = axes[rep]
ax.set_xlabel('Walltime / s')
timeseries = {}
times = []
for tag in tags:
timeseries[tag] = []
for run in range(args.runs):
filebase = '{}/{}-{}_run-{}_rep-{}-{}'.format(args.input_dir,
args.system, args.vari, run, rep, args.temp)
ops_filename = '{}.ops'.format(filebase)
ops = read_ops_from_file(ops_filename, tags, skip)
times_filename = '{}.times'.format(filebase)
new_times = np.loadtxt(times_filename, skiprows=1)[::skip, 1]
if run != 0:
new_times += times[-1]
times.extend(new_times.tolist())
for tag in tags:
timeseries[tag].extend(ops[tag])
# Plot timeseries
for i, tag in enumerate(tags):
ax.plot(times, timeseries[tag], marker=None, label=labels[i],
color='C{}'.format(i), zorder=4-i)
# Plot expected value
for i, tag in enumerate(tags):
if args.assembled_values[i] != 0:
ax.axhline(args.assembled_values[i], linestyle='--', color='C{}'.format(i))
# Plot legend
ax = axes[0]
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, frameon=False, loc='center',
bbox_to_anchor=(0.7, 0.25))
plt.tight_layout(pad=0.5, h_pad=0, w_pad=0)
f.savefig('{}.pdf'.format(out_filebase), transparent=True)
f.savefig('{}.png'.format(out_filebase), transparent=True)
def read_ops_from_file(filename, tags, skip):
"""Read specified order parameters from file
Returns a dictionary of tags to values.
"""
with open(filename) as inp:
header = inp.readline().split(', ')
all_ops = np.loadtxt(filename, skiprows=1, dtype=int)[::skip]
ops = {}
for i, tag in enumerate(header):
if tag in tags:
ops[tag] = all_ops[:, i]
return ops
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'input_dir',
type=str,
help='Directory of inputs')
parser.add_argument(
'output_dir',
type=str,
help='Output directory')
parser.add_argument(
'system',
type=str,
help='System')
parser.add_argument(
'vari',
type=str,
help='Simulation variant')
parser.add_argument(
'temp',
type=str,
help='Temperature to plot')
parser.add_argument(
'runs',
type=int,
help='Number of runs')
parser.add_argument(
'reps',
type=int,
help='Number of reps')
parser.add_argument(
'--assembled_values',
nargs='+',
type=int,
help='Bound staples bound domains misbound domains '
'fully stacked pairs')
return parser.parse_args()
if __name__ == '__main__':
main()
| null |
scripts/plotting/plot_ops_series.py
|
plot_ops_series.py
|
py
| 4,055 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "origamipy.plot.cm_to_inches",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "origamipy.plot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "origamipy.plot.set_default_appearance",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "origamipy.plot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 100,
"usage_type": "attribute"
}
] |
410752860
|
# Template for importing toy datasets for algo exploration and testing
# Load sklearn's datasets
from sklearn import datasets
# To load [sample] dataset below, uncomment desired dataset
# Comment out datasets not being tested, used
digits = datasets.load_digits() #1,797 images of handwritten digits, good for teaching image classification
boston = datasets.load_boston() #503 Boston housing prices, good for exploring regression algos
iris = datasets.load_iris() #150 measurements of Iris flowers, good for exploring classification algos
# Create features matrix by replacing [dataset] with desired dataset from load above
features = [dataset].data
# Create target vector by replacing [dataset] with desired dataset from load above
target = [dataset].target
# View first observation
features[0]
| null |
DataLoad/SampleDatasetLoad.py
|
SampleDatasetLoad.py
|
py
| 801 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sklearn.datasets.load_boston",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 10,
"usage_type": "name"
}
] |
486242495
|
from flask import render_template
from flask import Blueprint
from flask import url_for
from flask import Flask
from flask import request, Response
from flask import jsonify
import gzip
import os
import json
import random
import unicodedata
from settings import *
main = Blueprint('main', __name__, template_folder='templates', static_folder='static', static_url_path="/static")
@main.route('/', defaults={'path': ''})
@main.route('/<path:path>')
def index(path):
return render_template('index.html')
@main.route('/login', methods=['GET', 'POST'])
def login():
id = request.args.get('id')
if request.method == 'GET':
if check(id):
return json.dumps({ "data": 'ok' }), 200
else:
error = 'Invalid username'
return json.dumps({ "error": error }), 500
def check(username):
with open(settings_path+username_path) as f:
return username+'\n' in f
@main.route('/getentry', methods=['GET','POST'])
def getentry():
id = request.args.get('id')
if not check(id):
error = 'Invalid username'
return json.dumps({ "error": error }), 500
num = int(request.args.get('num'))
if request.method == 'GET':
i = random.randint(0,1)
file_name = getfilename(i, id)
if file_name == 'done':
print(getValidLabelNum(id))
valid_num, valid_rate, account_displayed = getValidLabelNum(id)
return json.dumps({ "data": 'ok', "valid_label": valid_num, "valid_rate": valid_rate, "account_displayed": account_displayed}), 200
return jsonify(process(file_name, num))
return 'Post'
@main.route('/label', methods=['GET','POST'])
def label():
if (request.method == 'POST'):
data = json.loads(request.data)
if not check(data['id']):
return 'username error'
try:
with open(log_path+already_labeled+'_'+data['id']+'.csv', 'r+') as f:
for line in f:
if line == str(data['num_displayed'])+' '+str(data['twitter_account_id'])+'\n':
return 'You already labeled this account at this number of tweets displayed.\nYou can choose other num_displayed or next account.'
except:
with open(log_path+already_labeled+'_'+data['id']+'.csv', 'a') as f:
pass
if (data['isUser'] == 0):
with open(log_path+label_result_path, 'a') as f:
f.write(str(data['id'])+' '+str(data['num_displayed'])+' '+str(data['twitter_account_id'])+' 1 '+str(data['reason'])+'\n')
with open(log_path+already_labeled+'_'+data['id']+'.csv', 'a') as f:
f.write(str(data['num_displayed'])+' '+str(data['twitter_account_id'])+'\n')
# bot
return 'label \'bot\' received'
elif (data['isUser'] == 1):
with open(log_path+label_result_path, 'a') as f:
f.write(str(data['id'])+' '+str(data['num_displayed'])+' '+str(data['twitter_account_id'])+' 0 '+ str(data['reason']) +'\n')
with open(log_path+already_labeled+'_'+data['id']+'.csv', 'a') as f:
f.write(str(data['num_displayed'])+' '+str(data['twitter_account_id'])+'\n')
return 'label \'not bot\' received'
else:
return 'wrong input'
# get random file name
def getfilename(isbot, id):
if isbot:
tmp = data_path + bot_path
else:
tmp = data_path + notbot_path
filelist = os.listdir(tmp)
n = 0
try:
with open(log_path+already_displayed+'_'+id+'.csv', 'r') as f:
while (n < most_random_times):
i = random.randint(0,len(filelist)-1)
f.seek(0)
if (str(filelist[i])+'\n' in f):
n += 1
else:
filename = tmp+filelist[i]
with open(log_path+already_displayed+'_'+id+'.csv', 'a') as ff:
ff.write(filelist[i]+'\n')
return un_gz(filename)
except:
with open(log_path+already_displayed+'_'+id+'.csv', 'a') as f:
i = random.randint(0,len(filelist)-1)
f.write(filelist[i]+'\n')
filename = tmp+filelist[i]
return un_gz(filename)
return "done"
def getValidLabelNum(id):
accounts = []
num = 0
try:
with open (log_path+already_displayed+'_'+id+'.csv') as f:
for line in f:
num += 1
except:
pass
try:
with open(log_path+already_labeled+'_'+id+'.csv', 'r') as f:
for line in f:
account = line.split(" ")[1]
if account not in accounts:
accounts += [account]
except:
return 0, 0, num
return len(accounts), len(accounts)/num, num
# unzip
def un_gz(file_name):
"""ungz zip file"""
f_name = file_name.replace(".gz", "")
g_file = gzip.GzipFile(file_name)
open(f_name, "wb").write(g_file.read())
g_file.close()
return f_name
def process(file, num):
data = []
done = 0
with open(file, encoding = "ISO-8859-1") as f:
for line in f:
try:
data.append(json.loads(line))
except:
continue
num -= 1
if num == 0:
break;
os.remove(file)
return data
| null |
backend/main/__init__.py
|
__init__.py
|
py
| 4,571 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Blueprint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "gzip.GzipFile",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 163,
"usage_type": "call"
}
] |
632687183
|
"""
Data models for "studies" studies include attributes about the data and are
heavier in terms of data load
"""
import json
import os
import sys
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import semantic_version
from .metadata import MetaData
from .expression import ExpressionData, SpikeInData
from .quality_control import MappingStatsData, MIN_READS
from .splicing import SplicingData, FRACTION_DIFF_THRESH
from ..compute.predict import PredictorConfigManager
from ..visualize.color import blue
from ..visualize.ipython_interact import Interactive
from ..external import data_package_url_to_dict, check_if_already_downloaded, \
make_study_datapackage, FLOTILLA_DOWNLOAD_DIR
SPECIES_DATA_PACKAGE_BASE_URL = 'http://sauron.ucsd.edu/flotilla_projects'
# import flotilla
# FLOTILLA_DIR = os.path.dirname(flotilla.__file__)
class StudyFactory(object):
_accepted_filetypes = 'tsv'
def __init__(self):
self.minimal_study_parameters = set()
self.new_study_params = set()
self.getters = []
self.default_sample_subset = None
self.default_feature_subset = None
def __setattr__(self, key, value):
"""Check if the attribute already exists and warns on overwrite.
"""
if hasattr(self, key):
warnings.warn('Over-writing attribute {}'.format(key))
super(StudyFactory, self).__setattr__(key, value)
@staticmethod
def _to_base_file_tuple(tup):
"""for making new packages, auto-loadable data!"""
assert len(tup) == 2
return "os.path.join(study_data_dir, %s)" % os.path.basename(tup[0]), \
tup[1]
def _add_package_data_resource(self, file_name, data_df,
toplevel_package_dir,
file_write_mode="tsv"):
writer = getattr(self, "_write_" + file_write_mode)
file_base = os.path.basename(file_name)
rsc_file = os.path.join(toplevel_package_dir, "study_data",
file_base + "." + file_write_mode)
writer(data_df, rsc_file)
return (rsc_file, file_write_mode)
def validate_params(self):
"""make sure that all necessary attributes are present"""
for param in self.minimal_study_parameters:
try:
x = getattr(self, param)
except KeyError:
raise AssertionError("Missing minimal parameter %s" % param)
@staticmethod
def _load_pickle_df(file_name):
return pd.read_pickle(file_name)
@staticmethod
def _write_pickle_df(df, file_name):
df.to_pickle(file_name)
@staticmethod
def _load_gzip_pickle_df(file_name):
import cPickle
import gzip
with gzip.open(file_name, 'r') as f:
return cPickle.load(f)
@staticmethod
def _write_gzip_pickle_df(df, file_name):
import tempfile
tmpfile_h, tmpfile = tempfile.mkstemp()
df.to_pickle(tmpfile)
import subprocess
subprocess.call(['gzip -f %s' % tempfile])
subprocess.call(['mv %s %s' % (tempfile, file_name)])
@staticmethod
def _load_tsv(file_name, compression=None):
return pd.read_table(file_name, index_col=0, compression=compression)
@staticmethod
def _load_json(filename, compression=None):
"""
Parameters
----------
filename : str
Name of the json file toread
compression : str
Not used, only for compatibility with other load functions
Returns
-------
Raises
------
"""
return pd.read_json(filename)
@staticmethod
def _write_tsv(df, file_name):
df.to_csv(file_name, sep='\t')
@staticmethod
def _load_csv(file_name, compression=None):
return pd.read_csv(file_name, index_col=0, compression=compression)
@staticmethod
def _write_csv(df, file_name):
df.to_csv(file_name)
def _get_loading_method(self, file_name):
"""loading_methods for loading from file"""
return getattr(self, "_load_" + file_name)
def load(self, file_name, file_type='pickle_df'):
return self._get_loading_method(file_type)(file_name)
class Study(StudyFactory):
"""A biological study, with associated metadata, expression, and splicing
data.
"""
default_feature_set_ids = []
# Data types with enough data that we'd probably reduce them, and even
# then we might want to take subsets. E.g. most variant genes for
# expresion. But we don't expect to do this for spikein or mapping_stats
# data
_subsetable_data_types = ['expression', 'splicing']
initializers = {'metadata_data': MetaData,
'expression_data': ExpressionData,
'splicing_data': SplicingData,
'mapping_stats_data': MappingStatsData,
'spikein_data': SpikeInData}
readers = {'tsv': StudyFactory._load_tsv,
'csv': StudyFactory._load_csv,
'json': StudyFactory._load_json,
'pickle_df': StudyFactory._load_pickle_df,
'gzip_pickle_df': StudyFactory._load_gzip_pickle_df}
_default_reducer_kwargs = {'whiten': False,
'show_point_labels': False,
'show_vectors': False}
_default_plot_kwargs = {'marker': 'o', 'color': blue}
def __init__(self, sample_metadata, version, expression_data=None,
splicing_data=None,
expression_feature_data=None,
expression_feature_rename_col='gene_name',
splicing_feature_data=None,
splicing_feature_rename_col='gene_name',
mapping_stats_data=None,
mapping_stats_number_mapped_col="Uniquely mapped reads "
"number",
mapping_stats_min_reads=MIN_READS,
spikein_data=None,
spikein_feature_data=None,
drop_outliers=True, species=None,
gene_ontology_data=None,
expression_log_base=None,
predictor_config_manager=None,
metadata_pooled_col=None,
metadata_phenotype_col='phenotype',
phenotype_order=None,
phenotype_to_color=None,
phenotype_to_marker=None,
license=None, title=None, sources=None):
"""Construct a biological study
This class only accepts data, no filenames. All data must already
have been read in and exist as Python objects.
Parameters
----------
#TODO: Maybe make these all kwargs?
sample_metadata : pandas.DataFrame
Only required parameter. Samples as the index, with features as
columns. If there is a column named "color", this will be used as
the color for that sample in DataFramePCA and other plots. If there is no
color but there is a column named "celltype", then colors for
each of the different celltypes will be auto-created.
expression_data : pandas.DataFrame
Samples x feature dataframe of gene expression measurements,
e.g. from an RNA-Seq or a microarray experiment. Assumed to be
log-normal (i.e. not log-transformed)
expression_feature_data : pandas.DatFrame
features x other_features dataframe describing other parameters
of the gene expression features, e.g. mapping Ensembl IDs to gene
symbols or gene biotypes.
expression_feature_rename_col : str
A column name in the expression_feature_data dataframe that you'd
like to rename the expression features to, in the plots. For
example, if your gene IDs are Ensembl IDs, but you want to plot
UCSC IDs, make sure the column you want, e.g. "ucsc_id" is in your
dataframe and specify that. Default "gene_name"
splicing_data : pandas.DataFrame
Samples x feature dataframe of percent spliced in scores, e.g. as
measured by the program MISO. Assumed that these values only fall
between 0 and 1.
splicing_feature_data : pandas.DataFrame
features x other_features dataframe describing other parameters
of the splicing features, e.g. mapping MISO IDs to Ensembl IDs or
gene symbols or transcript types
splicing_feature_rename_col : str
A column name in the splicing_feature_data dataframe that you'd
like to rename the splicing features to, in the plots. For
example, if your splicing IDs are MISO IDs, but you want to plot
Ensembl IDs, make sure the column you want, e.g. "ensembl_id" is
in your dataframe and specify that. Default "gene_name".
mapping_stats_data : pandas.DataFrame
Samples x feature dataframe of mapping stats measurements.
Currently, this
mapping_stats_number_mapped_col : str
A column name in the mapping_stats_data which specifies the
number of (uniquely or not) mapped reads. Default "Uniquely
mapped reads number"
spikein_data : pandas.DataFrame
samples x features DataFrame of spike-in expression values
spikein_feature_data : pandas.DataFrame
Features x other_features dataframe, e.g. of the molecular
concentration of particular spikein transcripts
drop_outliers : bool
Whether or not to drop samples indicated as outliers in the
sample_metadata from the other data, i.e. with a column
named 'outlier' in sample_metadata, then remove those
samples from expression_data for further analysis
species : str
Name of the species and genome version, e.g. 'hg19' or 'mm10'.
gene_ontology_data : pandas.DataFrame
Gene ids x ontology categories dataframe used for GO analysis.
metadata_pooled_col : str
Column in metadata_data which specifies as a boolean
whether or not this sample was pooled.
Note
----
This function explicitly specifies ALL the instance variables (except
those that are marked by the @property decorator), because,
as described [1], "If you write initialization functions separate from
__init__ then experienced developers will certainly see your code as a
kid's playground."
[1] http://stackoverflow.com/q/12513185/1628971
"""
super(Study, self).__init__()
sys.stderr.write("initializing study\n")
self.predictor_config_manager = predictor_config_manager \
if predictor_config_manager is not None \
else PredictorConfigManager()
# self.predictor_config_manager = None
self.species = species
self.gene_ontology_data = gene_ontology_data
self.license = license
self.title = title
self.sources = sources
self.version = version
self.metadata = MetaData(
sample_metadata, phenotype_order, phenotype_to_color,
phenotype_to_marker, pooled_col=metadata_pooled_col,
phenotype_col=metadata_phenotype_col,
predictor_config_manager=self.predictor_config_manager)
self.phenotype_col = self.metadata.phenotype_col
self.phenotype_order = self.metadata.phenotype_order
self.phenotype_to_color = self.metadata.phenotype_to_color
self.phenotype_to_marker = self.metadata.phenotype_to_marker
self.phenotype_color_ordered = self.metadata.phenotype_color_order
self.sample_id_to_phenotype = self.metadata.sample_id_to_phenotype
self.sample_id_to_color = self.metadata.sample_id_to_color
self.phenotype_transitions = self.metadata.phenotype_transitions
if 'outlier' in self.metadata.data and drop_outliers:
outliers = self.metadata.data.index[
self.metadata.data.outlier.astype(bool)]
else:
outliers = None
self.metadata.data['outlier'] = False
# Get pooled samples
if self.metadata.pooled_col is not None:
if self.metadata.pooled_col in self.metadata.data:
try:
pooled = self.metadata.data.index[
self.metadata.data[
self.metadata.pooled_col].astype(bool)]
except:
pooled = None
else:
pooled = None
if mapping_stats_data is not None:
self.mapping_stats = MappingStatsData(
mapping_stats_data,
mapping_stats_number_mapped_col,
predictor_config_manager=self.predictor_config_manager,
min_reads=mapping_stats_min_reads)
self.technical_outliers = self.mapping_stats.too_few_mapped
else:
self.technical_outliers = None
if expression_data is not None:
sys.stderr.write("loading expression data\n")
self.expression = ExpressionData(
expression_data,
expression_feature_data,
feature_rename_col=expression_feature_rename_col,
outliers=outliers,
log_base=expression_log_base, pooled=pooled,
predictor_config_manager=self.predictor_config_manager,
technical_outliers=self.technical_outliers)
# self.expression.networks = NetworkerViz(self.expression)
self.default_feature_set_ids.extend(self.expression.feature_subsets
.keys())
if splicing_data is not None:
sys.stderr.write("loading splicing data\n")
self.splicing = SplicingData(
splicing_data, splicing_feature_data,
feature_rename_col=splicing_feature_rename_col,
outliers=outliers, pooled=pooled,
predictor_config_manager=self.predictor_config_manager,
technical_outliers=self.technical_outliers)
# self.splicing.networks = NetworkerViz(self.splicing)
if spikein_data is not None:
self.spikein = SpikeInData(
spikein_data, spikein_feature_data,
technical_outliers=self.technical_outliers,
predictor_config_manager=self.predictor_config_manager)
sys.stderr.write("subclasses initialized\n")
self.validate_params()
sys.stderr.write("package validated\n")
@property
def default_sample_subsets(self):
default_sample_subsets = [col for col in self.metadata.data.columns
if self.metadata.data[col].dtype == bool]
default_sample_subsets.extend(['~{}'.format(col)
for col in self.metadata.data.columns
if
self.metadata.data[col].dtype == bool])
default_sample_subsets.insert(0, 'all_samples')
return default_sample_subsets
@property
def default_feature_subsets(self):
feature_subsets = {}
for name in self._subsetable_data_types:
try:
data_type = getattr(self, name)
except AttributeError:
continue
feature_subsets[name] = data_type.feature_subsets
return feature_subsets
@classmethod
def from_datapackage_url(
cls, datapackage_url,
load_species_data=True,
species_data_package_base_url=SPECIES_DATA_PACKAGE_BASE_URL):
"""Create a study from a url of a datapackage.json file
Parameters
----------
datapackage_url : str
HTTP url of a datapackage.json file, following the specification
described here: http://dataprotocols.org/data-packages/ and
requiring the following data resources: metadata,
expression, splicing
species_data_pacakge_base_url : str
Base URL to fetch species-specific gene and splicing event
metadata from. Default 'http://sauron.ucsd.edu/flotilla_projects'
Returns
-------
study : Study
A "study" object containing the data described in the
datapackage_url file
Raises
------
AttributeError
If the datapackage.json file does not contain the required
resources of metadata, expression, and splicing.
"""
data_package = data_package_url_to_dict(datapackage_url)
return cls.from_datapackage(
data_package, load_species_data=load_species_data,
species_datapackage_base_url=species_data_package_base_url)
@classmethod
def from_datapackage_file(
cls, datapackage_filename,
load_species_data=True,
species_datapackage_base_url=SPECIES_DATA_PACKAGE_BASE_URL):
with open(datapackage_filename) as f:
datapackage = json.load(f)
datapackage_dir = os.path.dirname(datapackage_filename)
return cls.from_datapackage(
datapackage, datapackage_dir=datapackage_dir,
load_species_data=load_species_data,
species_datapackage_base_url=species_datapackage_base_url)
@classmethod
def from_datapackage(
cls, datapackage, datapackage_dir='./',
load_species_data=True,
species_datapackage_base_url=SPECIES_DATA_PACKAGE_BASE_URL):
"""Create a study object from a datapackage dictionary
Parameters
----------
datapackage : dict
Returns
-------
study : flotilla.Study
Study object
"""
dfs = {}
log_base = None
# metadata_pooled_col = None
# metadata_phenotype_col = None
# phenotype_order = None
# phenotype_to_color = None
# phenotype_to_marker = None
metadata_kws = dict.fromkeys(['metadata_pooled_col', 'phenotype_order',
'phenotype_to_color',
'phenotype_to_marker'], None)
for resource in datapackage['resources']:
if 'url' in resource:
resource_url = resource['url']
filename = check_if_already_downloaded(resource_url)
else:
filename = resource['path']
# Test if the file exists, if not, then add the datapackage
# file
try:
with open(filename) as f:
pass
except IOError:
filename = os.path.join(datapackage_dir, filename)
name = resource['name']
reader = cls.readers[resource['format']]
compression = None if 'compression' not in resource else \
resource['compression']
dfs[name] = reader(filename, compression=compression)
if name == 'expression':
if 'log_transformed' in resource:
log_base = 2
if name == 'metadata':
if 'pooled_col' in resource:
metadata_kws['metadata_pooled_col'] = resource[
'pooled_col']
if 'phenotype_col' in resource:
metadata_kws['metadata_phenotype_col'] = resource[
'phenotype_col']
if 'phenotype_order' in resource:
metadata_kws['phenotype_order'] = resource[
'phenotype_order']
if 'phenotype_to_color' in resource:
metadata_kws['phenotype_to_color'] = resource[
'phenotype_to_color']
if 'phenotype_to_marker' in resource:
metadata_kws['phenotype_to_marker'] = resource[
'phenotype_to_marker']
species_dfs = {}
species = None if 'species' not in datapackage else datapackage[
'species']
if load_species_data:
try:
if 'species' in datapackage:
species_data_url = '{}/{}/datapackage.json'.format(
species_datapackage_base_url, species)
species_data_package = data_package_url_to_dict(
species_data_url)
# species_dfs = {}
for resource in species_data_package['resources']:
if 'url' in resource:
resource_url = resource['url']
filename = check_if_already_downloaded(resource_url)
else:
filename = resource['path']
# reader = getattr(cls, '_load_' + resource['format'])
reader = cls.readers[resource['format']]
compression = None if 'compression' not in resource else \
resource['compression']
name = resource['name']
species_dfs[name] = reader(filename,
compression=compression)
if 'feature_rename_col' in resource:
key = '{}_feature_rename_col'.format(
name.split('_feature_data')[0])
species_dfs[key] = resource['feature_rename_col']
except (IOError, ValueError) as e:
pass
try:
sample_metadata = dfs['metadata']
expression_data = None if 'expression' not in dfs else dfs[
'expression']
splicing_data = None if 'splicing' not in dfs else dfs['splicing']
except KeyError:
raise AttributeError('The datapackage.json file is required to '
'have the "metadata" resource')
try:
mapping_stats_data = dfs['mapping_stats']
except KeyError:
mapping_stats_data = None
try:
spikein_data = dfs['spikein']
except KeyError:
spikein_data = None
nones = [k for k, v in metadata_kws.iteritems() if v is None]
for key in nones:
metadata_kws.pop(key)
# import pdb; pdb.set_trace()
kwargs = species_dfs
kwargs.update(metadata_kws)
license = None if 'license' not in datapackage else datapackage[
'license']
title = None if 'title' not in datapackage else datapackage[
'title']
sources = None if 'sources' not in datapackage else datapackage[
'sources']
version = None if 'datapackage_version' not in datapackage else \
datapackage['datapackage_version']
if not semantic_version.validate(version):
raise ValueError('{} is not a valid version string. Please use '
'semantic versioning, with major.minor.patch, '
'e.g. 0.1.2 is a valid version string'.format(
version))
study = Study(
sample_metadata=sample_metadata,
expression_data=expression_data,
splicing_data=splicing_data,
mapping_stats_data=mapping_stats_data,
spikein_data=spikein_data,
# expression_feature_rename_col='gene_name',
# splicing_feature_rename_col='gene_name',
expression_log_base=log_base,
species=species,
license=license,
title=title,
sources=sources,
version=version,
**kwargs)
return study
def __add__(self, other):
"""Sanely concatenate one or more Study objects
"""
raise NotImplementedError
self.metadata = MetaData(
pd.concat([self.metadata.data,
other.metadata.data]))
self.expression.data = ExpressionData(
pd.concat([self.expression.data,
other.expression.data]))
# def _set_plot_colors(self):
# """If there is a column 'color' in the sample metadata, specify this
# as the plotting color
# """
# try:
# self._default_reducer_kwargs.update(
# {'colors_dict': self.metadata_data.color})
# self._default_plot_kwargs.update(
# {'color': self.metadata_data.color.tolist()})
# except AttributeError:
# sys.stderr.write("There is no column named 'color' in the "
# "metadata, defaulting to blue for all samples\n")
# self._default_reducer_kwargs.update(
# {'colors_dict': defaultdict(lambda: blue)})
# def _set_plot_markers(self):
# """If there is a column 'marker' in the sample metadata, specify this
# as the plotting marker (aka the plotting shape). Only valid matplotlib
# symbols are allowed. See http://matplotlib.org/api/markers_api.html
# for a more complete description.
# """
# try:
# self._default_reducer_kwargs.update(
# {'markers_dict': self.metadata_data.marker})
# self._default_plot_kwargs.update(
# {'marker': self.metadata_data.marker.tolist()})
# except AttributeError:
# sys.stderr.write("There is no column named 'marker' in the sample "
# "metadata, defaulting to a circle for all "
# "samples\n")
# self._default_reducer_kwargs.update(
# {'markers_dict': defaultdict(lambda: 'o')})
def detect_outliers(self):
"""Detects outlier cells from expression, mapping, and splicing
study_data and labels the outliers as such for future analysis.
Parameters
----------
self
Returns
-------
Raises
------
"""
# TODO.md: Boyko/Patrick please implement
raise NotImplementedError
def jsd(self):
"""Performs Jensen-Shannon Divergence on both splicing and expression
study_data
Jensen-Shannon divergence is a method of quantifying the amount of
change in distribution of one measurement (e.g. a splicing event or a
gene expression) from one celltype to another.
"""
raise NotImplementedError
# TODO: Check if JSD has not already been calculated (memoize)
self.expression.jsd()
self.splicing.jsd()
def normalize_to_spikein(self):
raise NotImplementedError
def compute_expression_splicing_covariance(self):
raise NotImplementedError
@staticmethod
def maybe_make_directory(filename):
# Make the directory if it's not already there
try:
directory = os.path.abspath(os.path.dirname(filename))
os.makedirs(os.path.abspath(directory))
except OSError:
pass
def feature_subset_to_feature_ids(self, data_type, feature_subset=None,
rename=False):
"""Given a name of a feature subset, get the associated feature ids
Parameters
----------
data_type : str
A string describing the data type, e.g. "expression"
feature_subset : str
A string describing the subset of data type (must be already
calculated)
Returns
-------
feature_ids : list of strings
List of features ids from the specified datatype
"""
if 'expression'.startswith(data_type):
return self.expression.feature_subset_to_feature_ids(
feature_subset, rename)
elif 'splicing'.startswith(data_type):
return self.splicing.feature_subset_to_feature_ids(
feature_subset, rename)
def sample_subset_to_sample_ids(self, phenotype_subset=None):
"""Convert a string naming a subset of phenotypes in the data into
sample ids
Parameters
----------
phenotype_subset : str
A valid string describing a boolean phenotype described in the
metadata data
Returns
-------
sample_ids : list of strings
List of sample ids in the data
"""
# IF this is a list of IDs
try:
#TODO: check this, seems like a strange usage: 'all_samples'.startswith(phenotype_subset)
if phenotype_subset is None or 'all_samples'.startswith(
phenotype_subset):
sample_ind = np.ones(self.metadata.data.shape[0],
dtype=bool)
elif phenotype_subset.startswith("~"):
sample_ind = ~pd.Series(
self.metadata.data[phenotype_subset.lstrip("~")],
dtype='bool')
else:
sample_ind = pd.Series(
self.metadata.data[phenotype_subset], dtype='bool')
sample_ids = self.metadata.data.index[sample_ind]
return sample_ids
except AttributeError:
return phenotype_subset
def plot_pca(self, data_type='expression', x_pc=1, y_pc=2,
sample_subset=None, feature_subset=None,
title='', featurewise=False,
show_point_labels=False, reduce_kwargs=None,
**kwargs):
"""Performs DataFramePCA on both expression and splicing study_data
Parameters
----------
data_type : str
One of the names of the data types, e.g. "expression" or
"splicing"
x_pc : int
Which principal component to plot on the x-axis
y_pc : int
Which principal component to plot on the y-axis
sample_subset : str or None
Which subset of the samples to use, based on some phenotype
column in the experiment design data. If None, all samples are
used.
feature_subset : str or None
Which subset of the features to used, based on some feature type
in the expression data (e.g. "variant"). If None, all features
are used.
title : str
The title of the plot
show_point_labels : bool
Whether or not to show the labels of the points. If this is
samplewise (default), then this labels the samples. If this is
featurewise, then this labels the features.
Raises
------
"""
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
feature_ids = self.feature_subset_to_feature_ids(data_type,
feature_subset,
rename=False)
if not featurewise:
label_to_color = self.phenotype_to_color
label_to_marker = self.phenotype_to_marker
groupby = self.sample_id_to_phenotype
order = self.phenotype_order
color = self.phenotype_color_ordered
else:
label_to_color = None
label_to_marker = None
groupby = None
order = None
color = None
if "expression".startswith(data_type):
reducer = self.expression.plot_pca(
x_pc=x_pc, y_pc=y_pc, sample_ids=sample_ids,
feature_ids=feature_ids,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
order=order, color=color,
featurewise=featurewise, show_point_labels=show_point_labels,
title=title, reduce_kwargs=reduce_kwargs, **kwargs)
elif "splicing".startswith(data_type):
reducer = self.splicing.plot_pca(
x_pc=x_pc, y_pc=y_pc, sample_ids=sample_ids,
feature_ids=feature_ids,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
order=order, color=color,
featurewise=featurewise, show_point_labels=show_point_labels,
title=title, reduce_kwargs=reduce_kwargs, **kwargs)
else:
raise ValueError('The data type {} does not exist in this study'
.format(data_type))
return reducer
def plot_graph(self, data_type='expression', sample_subset=None,
feature_subset=None, featurewise=False,
**kwargs):
"""Plot the graph (network) of these data
Parameters
----------
data_type : str
One of the names of the data types, e.g. "expression" or "splicing"
sample_subset : str or None
Which subset of the samples to use, based on some phenotype
column in the experiment design data. If None, all samples are
used.
feature_subset : str or None
Which subset of the features to used, based on some feature type
in the expression data (e.g. "variant"). If None, all features
are used.
"""
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
feature_ids = self.feature_subset_to_feature_ids(data_type,
feature_subset,
rename=False)
if not featurewise:
label_to_color = self.phenotype_to_color
label_to_marker = self.phenotype_to_marker
groupby = self.sample_id_to_phenotype
else:
label_to_color = None
label_to_marker = None
groupby = None
if data_type == "expression":
return self.expression.networks.draw_graph(
sample_ids=sample_ids, feature_ids=feature_ids,
sample_id_to_color=self.sample_id_to_color,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
featurewise=featurewise,
**kwargs)
elif data_type == "splicing":
return self.splicing.networks.draw_graph(
sample_ids=sample_ids, feature_ids=feature_ids,
sample_id_to_color=self.sample_id_to_color,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
featurewise=featurewise,
**kwargs)
def plot_study_sample_legend(self):
markers = self.metadata.data.color.groupby(
self.metadata.data.marker
+ "." + self.metadata.data.celltype).last()
f, ax = plt.subplots(1, 1, figsize=(3, len(markers)))
for i, point_type in enumerate(markers.iteritems(), ):
mrk, celltype = point_type[0].split('.')
ax.scatter(0, 0, marker=mrk, c=point_type[1],
edgecolor='none', label=celltype,
s=160)
ax.set_xlim(1, 2)
ax.set_ylim(1, 2)
ax.axis('off')
legend = ax.legend(title='cell type', fontsize=20, )
return legend
def plot_classifier(self, trait, sample_subset=None,
feature_subset='all_genes',
data_type='expression', title='',
show_point_labels=False,
**kwargs):
"""Plot a predictor for the specified data type and trait(s)
Parameters
----------
data_type : str
One of the names of the data types, e.g. "expression" or "splicing"
trait : str
Column name in the metadata data that you would like
to classify on
Returns
-------
"""
trait_data = self.metadata.data[trait]
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
feature_ids = self.feature_subset_to_feature_ids(data_type,
feature_subset,
rename=False)
feature_subset = 'none' if feature_subset is None else feature_subset
sample_subset = 'none' if sample_subset is None else sample_subset
data_name = '_'.join([sample_subset, feature_subset])
label_to_color = self.phenotype_to_color
label_to_marker = self.phenotype_to_marker
groupby = self.sample_id_to_phenotype
order = self.phenotype_order
color = self.phenotype_color_ordered
if data_type == "expression":
self.expression.plot_classifier(
data_name=data_name, trait=trait_data,
sample_ids=sample_ids, feature_ids=feature_ids,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
show_point_labels=show_point_labels, title=title,
order=order, color=color,
**kwargs)
elif data_type == "splicing":
self.splicing.plot_classifier(
data_name=data_name, trait=trait_data,
sample_ids=sample_ids, feature_ids=feature_ids,
label_to_color=label_to_color,
label_to_marker=label_to_marker, groupby=groupby,
show_point_labels=show_point_labels, title=title,
order=order, color=color,
**kwargs)
def plot_regressor(self, data_type='expression', **kwargs):
"""
"""
raise NotImplementedError
if data_type == "expression":
self.expression.plot_regressor(**kwargs)
elif data_type == "splicing":
self.splicing.plot_regressor(**kwargs)
def modalities(self, sample_subset=None, feature_subset=None):
"""Get modality assignments of
"""
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
feature_ids = self.feature_subset_to_feature_ids('splicing',
feature_subset,
rename=False)
return self.splicing.modalities(sample_ids, feature_ids)
def plot_modalities(self, sample_subset=None, feature_subset=None,
normed=True):
# try:
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
feature_ids = self.feature_subset_to_feature_ids('splicing',
feature_subset,
rename=False)
grouped = self.sample_id_to_phenotype.groupby(
self.sample_id_to_phenotype)
# Account for bar plot and plot of the reduced space of ALL samples
n = grouped.ngroups + 2
groups = ['all']
fig, axes = plt.subplots(ncols=n, figsize=(n * 4, 4))
bar_ax = axes[0]
all_ax = axes[1]
self.splicing.plot_modalities_reduced(sample_ids, feature_ids,
all_ax, title='all samples')
self.splicing.plot_modalities_bar(sample_ids, feature_ids,
bar_ax, i=0, normed=normed,
legend=False)
axes = axes[2:]
for i, ((celltype, series), ax) in enumerate(zip(grouped, axes)):
groups.append(celltype)
sys.stdout.write('\n---- {} ----\n'.format(celltype))
samples = series.index.intersection(sample_ids)
# legend = i == 0
self.splicing.plot_modalities_bar(samples, feature_ids,
bar_ax, i + 1, normed=normed,
legend=False)
self.splicing.plot_modalities_reduced(samples, feature_ids,
ax, title=celltype)
bar_ax.set_xticks(np.arange(len(groups)) + 0.4)
bar_ax.set_xticklabels(groups)
# except AttributeError:
# pass
def celltype_sizes(self, data_type='splicing'):
if data_type == 'expression':
self.expression.data.groupby(self.sample_id_to_phenotype,
axis=0).size()
if data_type == 'splicing':
self.splicing.data.groupby(self.sample_id_to_phenotype,
axis=0).size()
@property
def celltype_event_counts(self):
"""Number of cells that detected this event in that celltype
"""
return self.splicing.data.groupby(
self.sample_id_to_phenotype, axis=0).apply(
lambda x: x.groupby(level=0, axis=0).transform(
lambda x: x.count()).sum()).replace(0, np.nan)
def unique_celltype_event_counts(self, n=1):
celltype_event_counts = self.celltype_event_counts
return celltype_event_counts[celltype_event_counts <= n]
def percent_unique_celltype_events(self, n=1):
n_unique = self.unique_celltype_event_counts(n).sum(axis=1)
n_total = self.celltype_event_counts.sum(axis=1).astype(float)
return n_unique / n_total * 100
@property
def celltype_modalities(self):
"""Return modality assignments of each celltype
"""
return self.splicing.data.groupby(
self.sample_id_to_phenotype, axis=0).apply(
lambda x: self.splicing.modalities(x.index))
def plot_modalities_lavalamps(self, sample_subset=None, bootstrapped=False,
bootstrapped_kws=None):
grouped = self.splicing.data.groupby(self.sample_id_to_color, axis=0)
celltype_groups = self.splicing.data.groupby(
self.sample_id_to_phenotype, axis=0)
if sample_subset is not None:
# Only plotting one sample_subset, use the modality assignments
# from just the samples from this sample_subset
celltype_samples = celltype_groups.groups[sample_subset]
celltype_samples = set(celltype_groups.groups[sample_subset])
use_these_modalities = True
else:
# Plotting all the celltypes, use the modality assignments from
# all celltypes together
celltype_samples = self.splicing.data.index
use_these_modalities = False
for i, (color, sample_ids) in enumerate(grouped.groups.iteritems()):
x_offset = 1. / (i + 1)
sample_ids = celltype_samples.intersection(sample_ids)
if len(sample_ids) > 0:
self.splicing.plot_modalities_lavalamps(
sample_ids=sample_ids,
color=color,
x_offset=x_offset,
use_these_modalities=use_these_modalities,
bootstrapped=bootstrapped,
bootstrapped_kws=bootstrapped_kws)
def plot_event(self, feature_id, sample_subset=None):
"""Plot the violinplot and DataFrameNMF transitions of a splicing event
"""
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
self.splicing.plot_event(feature_id, sample_ids,
phenotype_groupby=self.sample_id_to_phenotype,
phenotype_order=self.phenotype_order,
color=self.phenotype_color_ordered,
phenotype_to_color=self.phenotype_to_color,
phenotype_to_marker=self.phenotype_to_marker)
def plot_gene(self, feature_id, sample_subset=None):
sample_ids = self.sample_subset_to_sample_ids(sample_subset)
self.expression.plot_feature(feature_id, sample_ids,
phenotype_groupby=self.sample_id_to_phenotype,
phenotype_order=self.phenotype_order,
color=self.phenotype_color_ordered,
phenotype_to_color=self.phenotype_to_color,
phenotype_to_marker=self.phenotype_to_marker)
def plot_lavalamp_pooled_inconsistent(
self, sample_subset=None, feature_ids=None,
fraction_diff_thresh=FRACTION_DIFF_THRESH):
# grouped_ids = self.splicing.data.groupby(self.sample_id_to_color,
# axis=0)
celltype_groups = self.metadata.data.groupby(
self.sample_id_to_phenotype, axis=0)
if sample_subset is not None:
# Only plotting one sample_subset
celltype_samples = set(celltype_groups.groups[sample_subset])
else:
# Plotting all the celltypes
celltype_samples = self.sample_subset_to_sample_ids(sample_subset)
celltype_and_sample_ids = celltype_groups.groups.iteritems()
for i, (phenotype, sample_ids) in enumerate(
celltype_and_sample_ids):
# import pdb; pdb.set_trace()
# Assumes all samples of a sample_subset have the same color...
# probably wrong
color = self.phenotype_to_color[phenotype]
sample_ids = celltype_samples.intersection(sample_ids)
if len(sample_ids) == 0:
continue
self.splicing.plot_lavalamp_pooled_inconsistent(
sample_ids, feature_ids, fraction_diff_thresh,
color=color)
def percent_pooled_inconsistent(self,
sample_subset=None, feature_ids=None,
fraction_diff_thresh=FRACTION_DIFF_THRESH):
celltype_groups = self.metadata.data.groupby(
self.sample_id_to_phenotype, axis=0)
if sample_subset is not None:
# Only plotting one sample_subset
celltype_samples = set(celltype_groups.groups[sample_subset])
else:
# Plotting all the celltypes
celltype_samples = self.metadata.data.index
celltype_and_sample_ids = celltype_groups.groups.iteritems()
for i, (sample_subset, sample_ids) in enumerate(
celltype_and_sample_ids):
# import pdb; pdb.set_trace()
# Assumes all samples of a sample_subset have the same color...
# probably wrong
color = self.sample_id_to_color[sample_ids[0]]
sample_ids = celltype_samples.intersection(sample_ids)
if len(sample_ids) == 0:
continue
self.splicing.percent_pooled_inconsistent(sample_ids, feature_ids,
fraction_diff_thresh)
# def plot_clusteredheatmap(self, sample_subset=None,
# feature_subset='variant',
# data_type='expression', metric='euclidean',
# linkage_method='median', figsize=None):
# if data_type == 'expression':
# data = self.expression.data
# elif data_type == 'splicing':
# data = self.splicing.data
# celltype_groups = data.groupby(
# self.sample_id_to_phenotype, axis=0)
#
# if sample_subset is not None:
# # Only plotting one sample_subset
# try:
# sample_ids = set(celltype_groups.groups[sample_subset])
# except KeyError:
# sample_ids = self.sample_subset_to_sample_ids(sample_subset)
# else:
# # Plotting all the celltypes
# sample_ids = data.index
#
# sample_colors = [self.sample_id_to_color[x] for x in sample_ids]
# feature_ids = self.feature_subset_to_feature_ids(data_type,
# feature_subset,
# rename=False)
#
# if data_type == "expression":
# return self.expression.plot_clusteredheatmap(
# sample_ids, feature_ids, linkage_method=linkage_method,
# metric=metric, sample_colors=sample_colors, figsize=figsize)
# elif data_type == "splicing":
# return self.splicing.plot_clusteredheatmap(
# sample_ids, feature_ids, linkage_method=linkage_method,
# metric=metric, sample_colors=sample_colors, figsize=figsize)
def plot_big_nmf_space_transitions(self, data_type='expression'):
if data_type == 'expression':
self.expression.plot_big_nmf_space_transitions(
self.sample_id_to_phenotype, self.phenotype_transitions,
self.phenotype_order, self.phenotype_color_ordered,
self.phenotype_to_color, self.phenotype_to_marker)
if data_type == 'splicing':
self.splicing.plot_big_nmf_space_transitions(
self.sample_id_to_phenotype, self.phenotype_transitions,
self.phenotype_order, self.phenotype_color_ordered,
self.phenotype_to_color, self.phenotype_to_marker)
def save(self, name, flotilla_dir=FLOTILLA_DOWNLOAD_DIR):
metadata = self.metadata.data
metadata_kws = {'pooled_col': self.metadata.pooled_col,
'phenotype_col': self.metadata.phenotype_col,
'phenotype_order': self.metadata.phenotype_order,
'phenotype_to_color':
self.metadata.phenotype_to_color,
'phenotype_to_marker':
self.metadata.phenotype_to_marker}
try:
expression = self.expression.data
expression_kws = {'feature_rename_col':
self.expression.feature_rename_col,
'log_base': self.expression.log_base}
except AttributeError:
expression = None
expression_kws = None
try:
splicing = self.splicing.data
splicing_kws = {'feature_rename_col':
self.splicing.feature_rename_col}
except AttributeError:
splicing = None
splicing_kws = None
try:
spikein = self.spikein.data
except AttributeError:
spikein = None
try:
mapping_stats = self.mapping_stats.data
mapping_stats_kws = {'number_mapped_col':
self.mapping_stats.number_mapped_col}
except AttributeError:
mapping_stats = None
mapping_stats_kws = None
# Increase the version number
version = semantic_version.Version(self.version)
version.patch = version.patch + 1
version = str(version)
return make_study_datapackage(name, metadata, expression, splicing,
spikein, mapping_stats,
metadata_kws=metadata_kws,
expression_kws=expression_kws,
splicing_kws=splicing_kws,
mapping_stats_kws=mapping_stats_kws,
species=self.species,
license=self.license,
title=self.title,
sources=self.sources,
version=version,
flotilla_dir=flotilla_dir)
# Add interactive visualizations
Study.interactive_classifier = Interactive.interactive_classifier
Study.interactive_graph = Interactive.interactive_graph
Study.interactive_pca = Interactive.interactive_pca
# Study.interactive_localZ = Interactive.interactive_localZ
Study.interactive_lavalamp_pooled_inconsistent = \
Interactive.interactive_lavalamp_pooled_inconsistent
# Study.interactive_clusteredheatmap = Interactive.interactive_clusteredheatmap
| null |
flotilla/data_model/study.py
|
study.py
|
py
| 52,788 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.warn",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_pickle",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cPickle.load",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.read_table",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.read_json",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "metadata.MetaData",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "expression.ExpressionData",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "splicing.SplicingData",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "quality_control.MappingStatsData",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "expression.SpikeInData",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "{'cPickle': 'cPickle', 'gzip': 'gzip', 'tempfile': 'tempfile', 'subprocess': 'subprocess'}._load_tsv",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "{'cPickle': 'cPickle', 'gzip': 'gzip', 'tempfile': 'tempfile', 'subprocess': 'subprocess'}._load_csv",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "{'cPickle': 'cPickle', 'gzip': 'gzip', 'tempfile': 'tempfile', 'subprocess': 'subprocess'}._load_json",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "{'cPickle': 'cPickle', 'gzip': 'gzip', 'tempfile': 'tempfile', 'subprocess': 'subprocess'}._load_pickle_df",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "{'cPickle': 'cPickle', 'gzip': 'gzip', 'tempfile': 'tempfile', 'subprocess': 'subprocess'}._load_gzip_pickle_df",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "visualize.color.blue",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "quality_control.MIN_READS",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "compute.predict.PredictorConfigManager",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "metadata.MetaData",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "quality_control.MappingStatsData",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "expression.ExpressionData",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 348,
"usage_type": "attribute"
},
{
"api_name": "splicing.SplicingData",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "expression.SpikeInData",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "external.data_package_url_to_dict",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "external.check_if_already_downloaded",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "external.data_package_url_to_dict",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "external.check_if_already_downloaded",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "semantic_version.validate",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "metadata.MetaData",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "expression.ExpressionData",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 606,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 684,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 685,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 736,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 744,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 881,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 881,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 982,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 982,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 994,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 1004,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1024,
"usage_type": "attribute"
},
{
"api_name": "splicing.FRACTION_DIFF_THRESH",
"line_number": 1095,
"usage_type": "name"
},
{
"api_name": "splicing.FRACTION_DIFF_THRESH",
"line_number": 1125,
"usage_type": "name"
},
{
"api_name": "external.FLOTILLA_DOWNLOAD_DIR",
"line_number": 1199,
"usage_type": "name"
},
{
"api_name": "semantic_version.Version",
"line_number": 1244,
"usage_type": "call"
},
{
"api_name": "external.make_study_datapackage",
"line_number": 1248,
"usage_type": "call"
},
{
"api_name": "visualize.ipython_interact.Interactive.interactive_classifier",
"line_number": 1263,
"usage_type": "attribute"
},
{
"api_name": "visualize.ipython_interact.Interactive",
"line_number": 1263,
"usage_type": "name"
},
{
"api_name": "visualize.ipython_interact.Interactive.interactive_graph",
"line_number": 1264,
"usage_type": "attribute"
},
{
"api_name": "visualize.ipython_interact.Interactive",
"line_number": 1264,
"usage_type": "name"
},
{
"api_name": "visualize.ipython_interact.Interactive.interactive_pca",
"line_number": 1265,
"usage_type": "attribute"
},
{
"api_name": "visualize.ipython_interact.Interactive",
"line_number": 1265,
"usage_type": "name"
},
{
"api_name": "visualize.ipython_interact.Interactive.interactive_lavalamp_pooled_inconsistent",
"line_number": 1268,
"usage_type": "attribute"
},
{
"api_name": "visualize.ipython_interact.Interactive",
"line_number": 1268,
"usage_type": "name"
}
] |
236366452
|
"""The base class for deriving types that require schema support.
.. image:: images/metadata_types.jpg
.. contents::
======
Usage
======
The **meta_type** module contains three classes that are used in the
definition and instantiation of object instances. The root class is
:class:`CoreType`, which is derived from the Python :class:`dict`. The
primary feature of the :class:`CoreType` is provide the means to access
properties of an object as a *dict* key or as object attribute.
Derived from :class:`CoreType` is :class:`MetaType`, which provides the
interface for retrieving a schema from an object schema definition via the
:meth:`MetaType.get_schema()`.
The :class:`PropertySchema` is utilized to define a schema for a single
property. This includes setting data type, required, and other such common
schema definition conventions. See :ref:`property-schema-settings-table` for
details on the :class:`PropertySchema` settings available.
It would be a rare case that requires the use of :class:`CoreType` or
:class:`MetaType`. For the majority of cases, the use of
:class:`PropertySchema` is sufficient. With that in mind, the remainder of
this section will focus on the use of :class:`PropertySchema`
Creating Property Schema
-------------------------
For general purposes, consider utilizing :class:`ontic.schema_type.SchemaType`,
for defining complete models. However, if you need validators for individual
properties, then direct use of :class:`PropertySchema` is a solution.
There are a number of ways to create a :class:`PropertySchema`. Take a look
at :class:`PropertySchema` class documentation for a complete exposition on
the means of instantiating an instance.
The most straight forward way to create an instance of a
:class:`PropertySchema`:
>>> prop_schema = PropertySchema(type='str', required=True, min=3)
>>> prop_schema
{'regex': None, 'enum': None, 'min': 3, 'default': None, 'max': None, \
'required': True, 'member_type': None, 'member_min': None, \
'type': <type 'str'>, 'member_max': None}
Demonstrated above is the creation of a property schema of type string. In
addition the property schema forces the value of the property to required and
of minimum length of 3.
Along with the schema settings explicitly set in the constructor, there are a
number of other property schema settings that may be utilized. These
additional settings can be viewed in the output of the *prop_schema* object.
For the details on the property schema settings, see
:ref:`property-schema-settings-table`.
A :class:`PropertySchema` can be created or modified dynamically. If done so,
then the final schema instance should be validated with the use of the method
:meth:`validate_property_schema`.
Utilizing Property Schema
--------------------------
Validation of a value utilizing the *prop_schema* created, is done with the
:meth:`validate_value` method.
>>> prop_schema = PropertySchema(type='str', required=True)
>>> some_value = 'The cat is on the roof.'
>>> validate_value(
... name='some_value', property_schema=prop_schema, value=some_value)
[]
:class:`validate_value` returns an empty list if there are no errors.
The *name* parameter of the :meth:`validate_value`, is used to construct
friendly error messages. For example:
>>> validate_value('some_prop', prop_schema, None)
['The value for "some_prop" is required.']
The following example demonstrates how a :class:`PropertySchema` being
instantiated with a dictionary. Subsequently a bad value is passed with
multiple validation errors.
>>> other_schema = PropertySchema({
... 'type': 'str',
... 'max': 3,
... 'enum': {'dog', 'rat', 'cat'}
... })
>>> validate_value('other_prop', other_schema, 'frog')
['The value "frog" for "other_prop" not in enumeration [\\'rat\\', \\'dog\\', \
\\'cat\\'].', 'The value of "frog" for "other_prop" fails max of 3.']
.. _property-schema-settings-table:
Available Property Schema Settings
-----------------------------------
The following table gives a listing of the property schema settings that can
be used to define properties. Details on the schema settings are provided
after the table.
.. table:: Property Schema Settings
============ ========= ======== ======== =================================
Name Type Default Required Enumeration
============ ========= ======== ======== =================================
type str None False basestring, bool, complex, date,
type datetime, dict, float, int, list,
long, None, set, str, time,
unicode
default None None False
required bool False False
enum set None False
min complex None False
date
datetime
float
int
long
time
max complex None False
date
datetime
float
int
long
time
regex str None False
member_type str None False basestring, bool, complex, date,
type datetime, dict, float, int, list,
long, None, set, str, time,
unicode
member_min complex None False
date
datetime
float
int
long
time
member_max complex None False
date
datetime
float
int
long
time
============ ========= ======== ======== =================================
*type*
The *type* settings restricts a property to a known type. If no type is
defined, then any value type may be assigned to the property.
The type definition can be by type or by string name. Both ``{type=int}``
and ``{type='int'}`` are valid examples of type declaration.
*default*
If no default is applied, then the default value will be ``None``. If a
default value is supplied, it will only be applied under two conditions.
A default value is applied during instantiation of an object of type
:class:`PropertySchema`, :class:`~ontic.schema_type.SchemaType`,
or :class:`~ontic.ontic_type.OnticType`. The other case is when an
instance of on of the given types is perfected via the methods
:func:`perfect_property_schema`, :func:`~ontic.schema_type.perfect_schema`,
or :func:`~ontic.ontic_type.perfect_object`.
The default is not applied during validation.
For the collection types (dict, list, and set), the default value is deep
copied. This is done to ensure that there is no sharing of collection
instances or values.
*required*
A *PropertySchema* with a required setting of *True*, will fail
validation if the property value is *None*.
*enum*
An *enum* setting is a set of values that the property value must adhere
to. If the *type* setting is provided, then the choices provided by
*enum* must be of that type. If no *type* is provided, then the choices
in the *enum* set may be of any type, even mixed type.
*min*
The *min* setting has differing behavior, based on the *type* setting. If
no *type* setting is provided, then *min* test will not occur. For the
boundable types (strings and collections) the *min* setting will test that
the value length is not less than the minimum. For the comparable types
(numeric and chronological) the *min* setting will test that the
value is not less than the minimum.
*max*
The *max setting has differing behavior, based on the *type* setting. If
no *type* setting is provided, the *max* test will not occur. For the
boundable types (strings and collections) the *max* setting will test that
the value length is not more than the maximum. For the comparable types
(numeric and chronological) the *max* setting will test that the
value is not more than the maximum.
*regex*
The *regex* setting is only tested if the *type* or *member_type* setting
is 'str' and the *regex* setting is not None. When active, the *regex*
setting will be used to test the given string value. If the property
value is 'None', then no regex testing will be done.
*member_type*
The *member_type* setting is used to restrict the value type for property
*type* 'list' or 'set'. It does so ensuring that each member of the
collection is of the type designated by *member_type*.
The type definition can be by type or by string name. Both
``{member_type=int}`` and ``{member_type='int'}`` are valid examples of
type declaration.
*member_min*
The *member_min* setting has differing behavior, based on the
*member_type* setting. If no *member_type* setting is provided, then
*member_min* test will not occur. For the boundable types
(strings and collections), the *member_min* setting will test that the
value length is not less than the minimum. For the comparable types
(numeric and chronological) the *member_minimum* setting will test
that the value is not less than the minimum.
*member_max*
The *member_max* setting has differing behavior, based on the
*member_max* setting. If no *member_type* setting is provided,
then *member_max* test will not occur. For the boundable types
(strings and collections), the *member_max* setting will test that the
value length is not more than the maximum. For the comparable types
(numeric and chronological) the *member_max* setting will test
that the value is not more than the maximum.
"""
from copy import copy, deepcopy
from datetime import date, datetime, time
import re
from ontic.validation_exception import ValidationException
# : The set of supported collection types.
COLLECTION_TYPES = {dict, list, set}
# : The set of types that can be compared with inequality operators.
COMPARABLE_TYPES = {complex, date, datetime, float, int, long, time}
# : The set of types that may be limited in size.
BOUNDABLE_TYPES = {basestring, str, unicode, list, dict, set}
# : The set of string types
STRING_TYPES = {basestring, str, unicode}
# : Used to convert the string declaration of attribute type to native type.
TYPE_MAP = {
'basestring': basestring,
basestring: basestring,
'bool': bool,
bool: bool,
'complex': complex,
complex: complex,
'date': date,
date: date,
'datetime': datetime,
datetime: datetime,
'dict': dict,
dict: dict,
'float': float,
float: float,
'int': int,
int: int,
'list': list,
list: list,
'long': long,
long: long,
'None': None,
None: None,
'set': set,
set: set,
'str': str,
str: str,
'time': time,
time: time,
'unicode': unicode,
unicode: unicode,
}
class CoreType(dict):
"""The root type of *Ontic* types.
**CoreType** ensures that *Ontic* object properties can be accessed by
either dict key or object attribute. For example::
>>> some_object = CoreType({'key1': 'value1'})
>>> assert some_object.key1 == 'value1'
>>> assert some_object['key1'] == 'value1'
>>> some_object.key2 = 'value2'
>>> assert some_object['key2'] == 'value2'
>>> some_object['key3'] = 'value3'
>>> assert some_object.key3 == 'value3'
"""
def __init__(self, *args, **kwargs):
r"""**CoreType** initialized as a `dict` type.
Initializes the accessor behavior to allow for property access as
dict key or object attribute.
Dict Style Initialization
CoreType() -> new empty CoreType
CoreType(mapping) -> new CoreType initialized from a mapping
object's (key, value) pairs
CoreType(iterable) -> new CoreType initialized as if via::
d = CoreType()
for k, v in iterable:
d[k] = v
CoreType(\*\*kwargs) -> new CoreType initialized with the
name=value pairs in the keyword argument list. For example::
CoreType(one=1, two=2)
"""
super(CoreType, self).__init__(*args, **kwargs)
self.__dict__ = self
def __copy__(self):
return type(self)(copy(dict(self)))
def __deepcopy__(self, memo):
the_copy = dict(self.__dict__)
return type(self)(deepcopy(the_copy, memo))
class MetaType(CoreType):
r"""Interface for type definition of **Ontic** schema defined classes.
Dict Style Initialization
MetaType() -> new empty MetaType
MetaType(mapping) -> new MetaType initialized from a mapping object's
(key, value) pairs
MetaType(iterable) -> new MetaType initialized as if via::
d = MetaType()
for k, v in iterable:
d[k] = v
MetaType(\*\*kwargs) -> new MetaType initialized with the name=value
pairs in the keyword argument list. For example::
MetaType(one=1, two=2)
"""
# : The Ontic schema pointer.
ONTIC_SCHEMA = None
@classmethod
def get_schema(cls):
"""Returns the schema object for the a given type definition.
:return: The schema metadata definition for a :class:`PropertySchema`
or a :class:`ontic.ontic_type.OnticType` derived child class.
:rtype: :class:`CoreType`, :class:`ontic.schema_type.SchemaType`
"""
return cls.ONTIC_SCHEMA
class PropertySchema(MetaType):
"""The object type for representing Property schema definitions.
The PropertySchema class is used to define individual properties of an
object. For the complete set of property schema settings to define a
property, see :ref:`property-schema-settings-table`
Examples::
There are a number of ways to create a PropertySchema for use in
validation of a property. The most straight forward is to define
a property schema with a dictionary.
>>> foo_schema = PropertySchema({
... 'type': 'str',
... 'required': True,
... 'default': 'Undefined',
... })
PropertySchema also support the full range of dict style instantiation.
>>> boo_schema = PropertySchema([('type','str'),('required',True)])
>>> moo_schema = PropertySchema(type='str', default='Cow')
PropertySchema can also be assembled pragmatically.
>>> bar_schema = PropertySchema()
>>> bar_schema.type = 'int'
>>> bar_schema.required = False
>>> bar_schema.min = 3
>>> val_errors = validate_property_schema(bar_schema)
>>> assert val_errors == []
>>> nutty_schema = PropertySchema()
>>> nutty_schema['type'] = 'str'
>>> nutty_schema['required'] = True
>>> nutty_schema['min'] = 5
>>> val_errors = validate_property_schema(nutty_schema)
>>> assert val_errors == []
"""
# The schema definition for the **PropertySchema** type.
ONTIC_SCHEMA = CoreType({
'type': MetaType({
'type': (basestring, str, unicode, type),
'default': None,
'required': False,
'enum': set(TYPE_MAP.keys()),
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'default': MetaType({
'type': None,
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'required': MetaType({
'type': bool,
'default': False,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'enum': MetaType({
'type': set,
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'min': MetaType({
'type': tuple(COMPARABLE_TYPES),
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'max': MetaType({
'type': tuple(COMPARABLE_TYPES),
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'regex': MetaType({
'type': (basestring, str, unicode),
'default': None,
'required': False,
'enum': None,
'min': 1,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'member_type': MetaType({
'type': (basestring, str, unicode, type),
'default': None,
'required': False,
'enum': set(TYPE_MAP.keys()),
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'member_min': MetaType({
'type': tuple(COMPARABLE_TYPES),
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
'member_max': MetaType({
'type': tuple(COMPARABLE_TYPES),
'default': None,
'required': False,
'enum': None,
'min': None,
'max': None,
'regex': None,
'member_type': None,
'member_min': None,
'member_max': None,
}),
})
def __init__(self, *args, **kwargs):
r"""Initializes in accordance with dict specification.
PropertySchema initialization can be done with a Dict object or with
None. A PropertySchema defined with None is legal and valid. It is
therefore possible to define a property with no restrictions to
assignment or requirement.
Dict Style Initialization
*PropertySchema* supports dict style initialization.
PropertySchema() -> new empty PropertySchema
PropertySchema(mapping) -> new PropertySchema initialized from a
mapping object's (key, value) pairs
PropertySchema(iterable) -> new PropertySchema initialized as if
via::
d = PropertySchema()
for k, v in iterable:
d[k] = v
PropertySchema(\*\*kwargs) -> new PropertySchema initialized with
the name=value pairs in the keyword argument list. For example::
PropertySchema(one=1, two=2)
"""
super(PropertySchema, self).__init__(*args, **kwargs)
perfect_property_schema(self)
validate_property_schema(self)
def validate_property_schema(candidate_property_schema,
raise_validation_exception=True):
"""Method to validate a property schema definition.
:param candidate_property_schema: The schema property to be validated.
:type candidate_property_schema: :class:`PropertySchema`
:param raise_validation_exception: If True, then *validate_property_schema*
will throw a *ValueException* upon validation failure. If False,
then a list of validation errors is returned. Defaults to True.
:type raise_validation_exception: bool
:return: If no validation errors are found, then *None* is
returned. If validation fails, then a list of the errors is returned
if the *raise_validation_exception* is set to True.
:rtype: list<str>, None
:raises ValueError: *the_candidate_schema_property* is not an
:class:`~ontic.ontic_type.OnticType`.
:raises ValidationException: A property of *candidate_property_schema*
does not meet schema requirements.
"""
if candidate_property_schema is None:
raise ValueError('"candidate_property_schema" must be provided.')
if not isinstance(candidate_property_schema, PropertySchema):
raise ValueError(
'"candidate_property_schema" must be PropertySchema type.')
value_errors = list()
for schema_name, schema_setting in (
candidate_property_schema.get_schema().iteritems()):
setting_value = candidate_property_schema.get(schema_name, None)
value_errors.extend(
validate_value(schema_name, schema_setting, setting_value))
if value_errors and raise_validation_exception:
raise ValidationException(value_errors)
return value_errors
def perfect_property_schema(candidate_property_schema):
"""Method to ensure the completeness of a given schema property.
This method ensures completeness by stripping out any properties that
are not defined by the schema definition. In addition, for any schema
properties that are not included, the method will add those
properties to the default value.
:param candidate_property_schema: The PropertySchema that is to be
clean and restricted.
:type candidate_property_schema: :class:`PropertySchema`
:rtype: None
:raises ValueError: If the candidate_property_schema is None, or not
of type *PropertySchema*.
"""
if candidate_property_schema is None:
raise ValueError('"candidate_property_schema" must be provided.')
if not isinstance(candidate_property_schema, PropertySchema):
raise ValueError(
'"candidate_property_schema" must be PropertySchema type.')
schema_property_schema = candidate_property_schema.get_schema()
# remove un-necessary properties.
extra_properties = set(candidate_property_schema.keys()) - set(
schema_property_schema.keys())
for property_name in extra_properties:
del candidate_property_schema[property_name]
if 'type' in candidate_property_schema:
# ensure that the type declaration is valid
if candidate_property_schema.type not in TYPE_MAP:
raise ValueError('Illegal type declaration: %s' %
candidate_property_schema.type)
# coerce type declarations as string to base types.
candidate_property_schema.type = TYPE_MAP.get(
candidate_property_schema.type, None)
else:
candidate_property_schema.type = None
if 'member_type' in candidate_property_schema:
# coerce member_type declarations as string to base types.
candidate_property_schema.member_type = TYPE_MAP[
candidate_property_schema.member_type]
else:
candidate_property_schema.member_type = None
for property_name, property_schema in (
schema_property_schema.iteritems()):
if property_name not in candidate_property_schema:
candidate_property_schema[
property_name] = property_schema.default
continue
if not candidate_property_schema[property_name]:
candidate_property_schema[property_name] = property_schema.default
def validate_value(name, property_schema, value):
"""Method to validate a given value against a given property schema.
:param name: The name of the value to be validated.
:type name: str
:param property_schema: The property schema that contains the validation
rules.
:type property_schema: :class:`PropertySchema`
:param value: The value that is to be validated.
:type value: object
:return: A list that is utilized to collect the errors found
during schema validation.
:rtype: list<str>
"""
value_errors = []
# required: True | False
if property_schema.required and value is None:
value_errors.append('The value for "%s" is required.' % name)
return value_errors # No other validation can occur without a value
if value is not None:
validate_non_none_value(name, property_schema, value, value_errors)
return value_errors
def validate_non_none_value(key, property_schema, value, value_errors):
"""Validates an **Ontic** object value that is not None.
This method validates singular and collection values. This method
does not perform *Required* validation, as it is assumed that the
value is not None.
:param key: The name of the property to be validated.
:type key: str
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value: The non-None value to be validated.
:type value: object
:param value_errors: A list of errors found for a given value. If any
given validator method fails, it will append it error message to
the value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if not property_schema.type:
# if no schema_type, then just check that
# the value is in an enum if necessary.
if not enum_validation(property_schema, value):
value_errors.append(
'The value "%s" for "%s" not in enumeration %s.' %
(value, key, list(property_schema.enum)))
return # No further processing can occur
else:
# type checking
if not isinstance(value, property_schema.type):
value_errors.append(
'The value for "%s" is not of type "%s": %s' %
(key, property_schema.type, str(value)))
# If not of the expected type, than can't further
# validate without errors.
return
if property_schema.type in COLLECTION_TYPES:
validate_collection_members(
key, property_schema, value, value_errors)
else:
non_none_singular_validation(
key, property_schema, value, value_errors)
def validate_collection_members(key, property_schema, value, value_errors):
"""Method to validate the members of a collection.
This method only operates on *list* and *set* collection types.
:param key: The name of the collection property to validate.
:type key: str
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value: The collection whose members will be validated.
:type value: list, set
:param value_errors: A list of errors found for a given collection.
If any members fail validation, the error condition will be
listed in value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if not min_validation(property_schema, value):
value_errors.append('The value of "%s" for "%s" fails min of %s.' %
(value, key, property_schema.min))
if not max_validation(property_schema, value):
value_errors.append('The value of "%s" for "%s" fails max of %s.' %
(value, key, property_schema.max))
if property_schema.type in {list, set}:
validators = list()
if property_schema.enum:
validators.append(validate_member_enum)
if property_schema.member_type:
validators.append(validate_member_type)
if property_schema.regex and property_schema.member_type == str:
validators.append(validate_member_regex)
if property_schema.member_min:
validators.append(validate_member_min)
if property_schema.member_max:
validators.append(validate_member_max)
for member_value in value:
execute_collection_validators(
key,
member_value,
property_schema,
validators,
value_errors)
def execute_collection_validators(
key,
member_value,
property_schema,
validators,
value_errors):
"""Method to execute a list of validators on a given collection.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member of the collection property to validate.
:type member_value: str, int, float, date, datetime, time
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param validators: A list of validation methods to execute.
:type validators: list<types.MethodType>
:param value_errors: A list of errors found for a given value. If any
given validator method fails, it will append it error message to
the value_errors list.
:type value_errors: list<str>
:rtype: None
"""
for validator in validators:
validator(key, member_value, property_schema, value_errors)
def validate_member_enum(key, member_value, property_schema, value_errors):
"""Validate a member of a collection is within a defined enumeration.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member of the collection property to
validate.
:type member_value: str, int, float, date, datetime, time
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value_errors: A list of errors found for a given value. If the
validate fails, then an error message is added to the
value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if not enum_validation(property_schema, member_value):
value_errors.append(
'The value "%s" for "%s" not in enumeration %s.' %
(member_value, key, sorted(list(property_schema.enum))))
def validate_member_type(key, member_value, property_schema, value_errors):
"""Validate a member of a collection is of a given type.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member value of the collection property to
validate.
:type member_value: object
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value_errors: A list of errors found for a given value. If the
validation fails, then an error message is added to the
value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if not isinstance(member_value, property_schema.member_type):
value_errors.append(
'The value "%s" for "%s" is not of type "%s".' %
(str(member_value), key, property_schema.member_type))
def validate_member_regex(key, member_value, property_schema, value_errors):
"""Validate a member of a collection against a defined regex.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member value of the collection property to
validate.
:type member_value: str
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value_errors: A list of errors found for a given value. If the
validation fails, then an error message is added to the
value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if not re.match(property_schema.regex, member_value):
value_errors.append(
'Value "%s" for "%s" does not meet regex: %s' %
(member_value, key, property_schema.regex))
def validate_member_min(key, member_value, property_schema, value_errors):
"""Validate a member of a collection for minimum allowable value.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member value of the collection property to
validate.
:type member_value: str, int, float, date, datetime, time
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value_errors: A list of errors found for a given value. If the
validation fails, then an error message is added to the
value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if property_schema.member_type in STRING_TYPES:
if len(member_value) < property_schema.member_min:
value_errors.append(
'The value of "%s" for "%s" fails min length of %s.' %
(member_value, key, property_schema.member_min))
if property_schema.member_type in COMPARABLE_TYPES:
if member_value < property_schema.member_min:
value_errors.append(
'The value of "%s" for "%s" fails min size of %s.' %
(member_value, key, property_schema.member_min))
def validate_member_max(key, member_value, property_schema, value_errors):
"""Validate a member of a collection for maximum allowable value.
:param key: The name of the collection property to validate.
:type key: str
:param member_value: The member value of the collection property to
validate.
:type member_value: str, int, float, date, datetime, time
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value_errors: A list of errors found for a given value. If the
validation fails, then an error message is added to the
value_errors list.
:type value_errors: list<str>
:rtype: None
"""
if property_schema.member_type in STRING_TYPES:
if len(member_value) > property_schema.member_max:
value_errors.append(
'The value of "%s" for "%s" fails max length of %s.' %
(member_value, key, property_schema.member_max))
if property_schema.member_type in COMPARABLE_TYPES:
if member_value > property_schema.member_max:
value_errors.append(
'The value of "%s" for "%s" fails max size of %s.' %
(member_value, key, property_schema.member_max))
def enum_validation(property_schema, value):
"""Validate a non-collection property for value in an enumeration set.
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value: The value of the property to be validated.
:type value: object
:return: True if the validation is successful, else False.
:rtype: bool
"""
if property_schema.enum:
if not value in property_schema.enum:
return False
return True
def min_validation(property_schema, value):
"""Validate a non-collection property for minimum allowable value.
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value: The value of the property to be validated.
:type value: str, int, float, date, datetime, time, dict, list, set
:return: True if the validation is successful, else False.
:rtype: bool
"""
if property_schema.min:
if property_schema.type in BOUNDABLE_TYPES:
if len(value) < property_schema.min:
return False
if property_schema.type in COMPARABLE_TYPES:
if value < property_schema.min:
return False
return True
def max_validation(property_schema, value):
"""Validates a non-collection property for maximum allowable value.
:param property_schema: The property schema to utilize for validation.
:type property_schema: :class:`PropertySchema`
:param value: The value of the property to be validated.
:type value: str, int, float, date, datetime, time, dict, list, set
:return: True if the validation is successful, else False.
:rtype: bool
"""
if property_schema.max:
if property_schema.type in BOUNDABLE_TYPES:
if len(value) > property_schema.max:
return False
if property_schema.type in COMPARABLE_TYPES:
if value > property_schema.max:
return False
return True
def non_none_singular_validation(key, property_schema, value, value_errors):
"""Method to validate an object value meets schema requirements.
This method validates non-collection properties. The method should
only be used for non-None values.
:param key: The name of the property that is being validated.
:type key: str
:param property_schema: The schema definition for the target property.
:type property_schema: :class:`PropertySchema`
:param value: The value to be tested against the given schema.
:type value: str, int, float, date, datetime, time, dict, list, set
:param value_errors: A list of the validation errors discovered. The
value errors will be added to if the given value fails validation.
:type value_errors: list<str>
:rtype: None
"""
# enum
if not enum_validation(property_schema, value):
value_errors.append('The value "%s" for "%s" not in enumeration %s.' %
(value, key, list(property_schema.enum)))
# min
if not min_validation(property_schema, value):
value_errors.append('The value of "%s" for "%s" fails min of %s.' %
(value, key, property_schema.min))
# max
if not max_validation(property_schema, value):
value_errors.append('The value of "%s" for "%s" fails max of %s.' %
(value, key, property_schema.max))
# regex validation
if property_schema.regex:
if property_schema.type in STRING_TYPES and value is not '':
if not re.match(property_schema.regex, value):
value_errors.append(
'Value "%s" for %s does not meet regex: %s' %
(value, key, property_schema.regex))
| null |
ontic/meta_type.py
|
meta_type.py
|
py
| 39,107 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.date",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "ontic.validation_exception.ValidationException",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 1025,
"usage_type": "call"
}
] |
287047384
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 20:49:43 2020
@author: alres
"""
import sklearn
import re
from praw.models import MoreComments
import itertools
from bs4 import BeautifulSoup
import nltk
#nltk.download('all')
from nltk.corpus import stopwords
import flask
import pandas as pd
from flask import Flask,request,render_template,url_for,send_file
import json
import os
import praw
from Flair_detector import lr
reddit=praw.Reddit(client_id='acP3HQrR6pmNDQ',client_secret='9Ii4t82UOtuwc9ADN4Dv_j3hL6E',user_agent='Flair_detector',username='OmegaDeathOmega',password='omegaDeath_')
REPLACE_BY_SPACE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
text = BeautifulSoup(text, "lxml").text
text = text.lower()
text = REPLACE_BY_SPACE.sub(' ', text)
text = BAD_SYMBOLS.sub('', text)
text = ' '.join(word for word in text.split() if word not in STOPWORDS)
print(text)
return text
global urls
urls=[]
data=[]
# app
app = flask.Flask(__name__,template_folder='templates')
# routes
@app.route('/',methods=['GET','POST'])
def predict():
if flask.request.method=='GET':
return flask.render_template('home.html')
if flask.request.method=='POST':
val = flask.request.form['url']
submission = reddit.submission(url=val)
body=submission.selftext
title=submission.title
comments=[]
for com in submission.comments:
if isinstance(com, MoreComments):
continue
comments.append(com.body)
post_text=title+' '+body+' '+' '.join(comments)
post_text=clean_text(post_text)
df=[post_text]
return flask.render_template("result.html",prediction=lr.predict(df))
dictionary={}
@app.route('/automated_testing',methods=['GET','POST'])
def automated_testing():
if flask.request.method=='GET':
return flask.render_template('home.html')
if flask.request.method=='POST':
if 'file' not in flask.request.files:
print('File not uploaded')
return
file=flask.request.files['file']
file.save(os.path.join(file.filename))
fp=open(file.filename,"r")
for line in fp:
strip_line=line.strip()
urls.append(strip_line)
return flask.render_template("downloads.html")
@app.route('/return-file')
def return_file():
for line in urls:
submission = reddit.submission(url=line)
body=submission.selftext
title=submission.title
comments=[]
for com in submission.comments:
if isinstance(com, MoreComments):
continue
comments.append(com.body)
post_text=title+' '+body+' '+' '.join(comments)
post_text=clean_text(post_text)
data.append(post_text)
lr_pred=lr.predict(data)
for (i,j) in itertools.zip_longest(urls,lr_pred):
dictionary[i]=j
with open('pred.json','w') as fp:
json.dump(dictionary,fp)
return send_file('pred.json')
if __name__ == '__main__':
app.run(port = 8000, debug=True)
| null |
Flair_Detector_Model/app.py
|
app.py
|
py
| 3,596 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "praw.Reddit",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "praw.models.MoreComments",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "Flair_detector.lr.predict",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "Flair_detector.lr",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.request",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "praw.models.MoreComments",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "Flair_detector.lr.predict",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "Flair_detector.lr",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "itertools.zip_longest",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 127,
"usage_type": "call"
}
] |
402501435
|
#!/usr/bin/env python3
# -------------------------------
# projects/netflix/Testnetflix.py
# Copyright (C) 2014
# Glenn P. Downing
# -------------------------------
# -------
# imports
# -------
from io import StringIO
from unittest import main, TestCase
from Netflix import netflix_eval_movieID, netflix_solve
# -----------
# Testnetflix
# -----------
class TestNetflix (TestCase) :
# ----
# eval
# ----
def test_eval_1 (self) :
netflix_eval_movieID_closure=netflix_eval_movieID("2043")
prediction=netflix_eval_movieID_closure("1417435")
self.assertEqual(prediction, 4.2)
def test_eval_2 (self) :
netflix_eval_movieID_closure=netflix_eval_movieID("1001")
prediction=netflix_eval_movieID_closure("780049")
self.assertEqual(prediction, 3.9)
def test_eval_3 (self) :
netflix_eval_movieID_closure=netflix_eval_movieID("10019")
prediction=netflix_eval_movieID_closure("1041111")
self.assertEqual(prediction, 3.7)
# -----
# solve
# -----
def test_solve_1 (self) :
r = StringIO("2043:\n1417435\n2312054\n462685\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "2043:\n4.2\n3.8\n4.2\nRMSE: 1.76\n")
def test_solve_2 (self) :
r = StringIO("10020:\n431122\n251807\n47787\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "10020:\n3.5\n3.7\n3.9\nRMSE: 0.61\n")
def test_solve_3 (self) :
r = StringIO("5318:\n2234029\n1487546\n1990010\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "5318:\n3.8\n3.2\n3.7\nRMSE: 0.88\n")
# ----
# main
# ----
if __name__ == "__main__" :
main()
"""
% coverage3 run --branch TestNetflix.py > TestNetflix.out 2>&1
% coverage3 report -m >> TestNetflix.out
% cat TestNetflix.out
.......
----------------------------------------------------------------------
Ran 7 tests in 0.001s
OK
Name Stmts Miss Branch BrMiss Cover Missing
---------------------------------------------------------
netflix 18 0 6 0 100%
Testnetflix 33 1 2 1 94% 79
---------------------------------------------------------
TOTAL 51 1 8 1 97%
"""
| null |
ch34698-TestNetflix.py
|
ch34698-TestNetflix.py
|
py
| 2,364 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "Netflix.netflix_eval_movieID",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Netflix.netflix_eval_movieID",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Netflix.netflix_eval_movieID",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "Netflix.netflix_solve",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "Netflix.netflix_solve",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "Netflix.netflix_solve",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 75,
"usage_type": "call"
}
] |
415420604
|
#!/usr/bin/python
import os.path
from flask import flash, redirect, render_template, send_from_directory, url_for, request, session, abort
from pyipam.server import app
from pyipam.models.main import MainModel
from pyipam.models.subnets import SubnetsModel
# Initalise models
model = MainModel()
subnetsModel = SubnetsModel()
# Publish the public resources, CSS etc.
@app.route('/res/<path:path>')
def send_res(path):
return send_from_directory('static', path)
@app.route('/')
def home():
filename = os.path.join(os.path.dirname(__file__), '..', 'config', 'main.ini')
subnets = subnetsModel.load_subnets()
# Redirect to setup page if conf doesn't exist
if not(os.path.isfile(filename)):
return redirect('/setup')
else:
return render_template(
'main/home.html',
subnets=subnets
)
@app.route('/setup', methods=['GET', 'POST'])
def setup():
if request.method == 'GET':
return render_template(
'main/setup.html', **locals()
)
elif request.method == 'POST':
fields={
'host': request.form['host'],
'database': request.form['database'],
'user': request.form['user'],
'password': request.form['password']
}
model.setup_app(fields)
return redirect('/')
@app.route('/about')
def about():
return render_template(
'main/about.html', **locals()
)
| null |
pyipam/controllers/main.py
|
main.py
|
py
| 1,451 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyipam.models.main.MainModel",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyipam.models.subnets.SubnetsModel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app.route",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app.route",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app.route",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app.route",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pyipam.server.app",
"line_number": 47,
"usage_type": "name"
}
] |
146204285
|
#!/usr/bin/env python3
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import sys
import glob
import shutil
import ntpath
import pickle
import datetime
import argparse
import numpy as np
import numpy.matlib
from xml.etree.ElementTree import ElementTree
from subprocess import check_call
import isce
import isceobj
from imageMath import IML
SCR_PATH = os.path.abspath(os.path.dirname(__file__))
BIN_PATH = os.path.join(os.path.dirname(SCR_PATH), "src")
def runCmd(cmd):
print("{}".format(cmd))
#status = os.system(cmd)
status = check_call(cmd, shell=True)
if status != 0:
raise Exception('error when running:\n{}\n'.format(cmd))
def getWidth(xmlfile):
xmlfp = None
try:
xmlfp = open(xmlfile,'r')
print('reading file width from: {0}'.format(xmlfile))
xmlx = ElementTree(file=xmlfp).getroot()
tmp = xmlx.find("component[@name='coordinate1']/property[@name='size']/value")
if tmp == None:
tmp = xmlx.find("component[@name='Coordinate1']/property[@name='size']/value")
width = int(tmp.text)
print("file width: {0}".format(width))
except (IOError, OSError) as strerr:
print("IOError: %s" % strerr)
return []
finally:
if xmlfp is not None:
xmlfp.close()
return width
def getLength(xmlfile):
xmlfp = None
try:
xmlfp = open(xmlfile,'r')
print('reading file length from: {0}'.format(xmlfile))
xmlx = ElementTree(file=xmlfp).getroot()
tmp = xmlx.find("component[@name='coordinate2']/property[@name='size']/value")
if tmp == None:
tmp = xmlx.find("component[@name='Coordinate2']/property[@name='size']/value")
length = int(tmp.text)
print("file length: {0}".format(length))
except (IOError, OSError) as strerr:
print("IOError: %s" % strerr)
return []
finally:
if xmlfp is not None:
xmlfp.close()
return length
def create_xml(fileName, width, length, fileType):
if fileType == 'slc':
image = isceobj.createSlcImage()
elif fileType == 'int':
image = isceobj.createIntImage()
elif fileType == 'amp':
image = isceobj.createAmpImage()
elif fileType == 'rmg':
image = isceobj.Image.createUnwImage()
elif fileType == 'float':
image = isceobj.createImage()
image.setDataType('FLOAT')
image.setFilename(fileName)
image.setWidth(width)
image.setLength(length)
image.setAccessMode('read')
#image.createImage()
image.renderVRT()
image.renderHdr()
#image.finalizeImage()
def post_process_lon_max(lon_looked_data):
# find min max:
lon_max = np.nanmax(lon_looked_data)
lon_min = np.nanmin(lon_looked_data)
bounds = [(lon_min,lon_max)]
if (lon_max - lon_min) > 320:
# we conclude that the anti-meridian has been crossed
# here we put NaNs in the interpolated area between +180/-180 boundary
lon_looked_data_temp = lon_looked_data.copy()
lon_grad_lr = np.gradient(lon_looked_data, axis=1)
lon_grad_rl = np.gradient(np.fliplr(lon_looked_data), axis=1)
track_dirn = None
if np.amin(lon_looked_data[:, 0]) > 0 and np.amin(lon_looked_data[:, -1]) < 0:
# if AP on left, NA on right, asc
track_dirn = 'A'
else:
# if NA on left, AP on right, dsc
track_dirn = 'D'
for i in range(len(lon_grad_lr)):
lr_row = lon_grad_lr[i, :]
rl_row = lon_grad_rl[i, :]
# import pdb; pdb.set_trace()
if track_dirn == 'A':
lr_ind = int(np.argwhere(lr_row < 0)[0]) # get first occurrence of drop +180 to -180
rl_ind = int(np.argwhere(rl_row > 0)[0]) # get first occurrence of rise -180 to +180
else:
lr_ind = int(np.argwhere(lr_row > 0)[0]) # get first occurrence of rise -180 to +180
rl_ind = int(np.argwhere(rl_row < 0)[0]) # get first occurrence of drop +180 to -180
# print(f"for row {i} lr_ind: {lr_ind}, rl_ind:{rl_ind}")
lon_looked_data_temp[i, lr_ind + 1:-rl_ind + 1] = np.nan
# get the AP limit
AP_West = np.nanmin(np.where(lon_looked_data_temp > 0, lon_looked_data_temp, np.inf))
AP_East = np.nanmax(np.where(lon_looked_data_temp > 0, lon_looked_data_temp, -np.inf))
NA_West = np.nanmin(np.where(lon_looked_data_temp < 0, lon_looked_data_temp, np.inf))
NA_East = np.nanmax(np.where(lon_looked_data_temp < 0, lon_looked_data_temp, -np.inf))
bounds = [(AP_West, AP_East), (NA_West, NA_East)]
return bounds
def cmdLineParse():
"""
Command line parser.
"""
parser = argparse.ArgumentParser( description='log ratio')
parser.add_argument('-mdir', dest='mdir', type=str, required=True, help='master directory containing the bursts')
parser.add_argument('-sdir', dest='sdir', type=str, required=True, help='slave directory containing the bursts')
parser.add_argument('-gdir', dest='gdir', type=str, required=True,
help='geometric directory containing the lat/lon files')
parser.add_argument('-rlks', dest='rlks', type=int, default=0, help='number of range looks')
parser.add_argument('-alks', dest='alks', type=int, default=0, help='number of azimuth looks')
parser.add_argument('-ssize', dest='ssize', type=float, default=1.0,
help='output geocoded sample size. default: 1.0 arcsec')
return parser.parse_args()
if __name__ == '__main__':
SCR_DIR = SCR_PATH
inps = cmdLineParse()
mbursts = sorted(glob.glob(os.path.join(inps.mdir, 'burst_*.slc')))
sbursts = sorted(glob.glob(os.path.join(inps.sdir, 'burst_*.slc')))
nmb = len(mbursts) #number of master bursts
nsb = len(sbursts) #number of slave bursts
lats = sorted(glob.glob(os.path.join(inps.gdir, 'lat_*.rdr')))
lons = sorted(glob.glob(os.path.join(inps.gdir, 'lon_*.rdr')))
nb = nmb
for i in range(nb):
print('+++++++++++++++++++++++++++++++++++')
print('processing burst {} of {}'.format(i+1, nb))
print('+++++++++++++++++++++++++++++++++++')
# find slave burst here
master_burst = ntpath.basename(mbursts[i])
slave_burst_id = -1
for ii in range(nsb):
slave_burst = ntpath.basename(sbursts[ii])
if slave_burst == master_burst:
slave_burst_id = ii
break
if slave_burst_id == -1:
print('no matching slave burst found, skip this burst')
continue
amp = 'amp_%02d.amp' % (i+1)
# cmd = "imageMath.py -e='(real(a)!=0)*(real(b)!=0)*(imag(a)!=0)*(imag(b)!=0)*sqrt(real(a)*real(a)+imag(a)*imag(a));(real(a)!=0)*(real(b)!=0)*(imag(a)!=0)*(imag(b)!=0)*sqrt(real(b)*real(b)+imag(b)*imag(b))' --a={} --b={} -o {} -t float -s BIP".format(
# mbursts[i],
# sbursts[i],
# amp)
# runCmd(cmd)
width = getWidth(mbursts[i] + '.xml')
length = getLength(mbursts[i] + '.xml')
width_looked = int(old_div(width,inps.rlks))
length_looked = int(old_div(length,inps.alks))
master = np.fromfile(mbursts[i], dtype=np.complex64).reshape(length, width)
slave = np.fromfile(sbursts[slave_burst_id], dtype=np.complex64).reshape(length, width)
amp_data = np.zeros((length, width*2), dtype=np.float)
amp_data[:, 0:width * 2:2] = np.absolute(master) * (np.absolute(slave) != 0)
amp_data[:, 1:width * 2:2] = np.absolute(slave) * (np.absolute(master) != 0)
amp_data.astype(np.float32).tofile(amp)
create_xml(amp, width, length, 'amp')
amp_looked = 'amp_%02d_%drlks_%dalks.amp' % (i + 1, inps.rlks, inps.alks)
cmd = "{}/look.py -i {} -o {} -r {} -a {}".format(SCR_DIR, amp, amp_looked, inps.rlks, inps.alks)
runCmd(cmd)
# mburst_looked = 'master_%02d_%drlks_%dalks.slc' % (i+1,inps.rlks,inps.alks)
# cmd = "look.py -i {} -o {} -r {} -a {}".format(
# mbursts[i],
# mburst_looked,
# inps.rlks,
# inps.alks)
# runCmd(cmd)
# sburst_looked = 'slave_%02d_%drlks_%dalks.slc' % (i+1,inps.rlks,inps.alks)
# cmd = "look.py -i {} -o {} -r {} -a {}".format(
# sbursts[i],
# sburst_looked,
# inps.rlks,
# inps.alks)
# runCmd(cmd)
lat_looked = 'lat_%02d_%drlks_%dalks.rdr' % (i + 1, inps.rlks, inps.alks)
#lat = os.path.join(inps.gdir, 'lat_%02d.rdr'%(i+1))
cmd = "{}/look.py -i {} -o {} -r {} -a {}".format(SCR_DIR,
lats[slave_burst_id],
lat_looked,
inps.rlks,
inps.alks)
runCmd(cmd)
lon_looked = 'lon_%02d_%drlks_%dalks.rdr' % (i + 1, inps.rlks, inps.alks)
#lon = os.path.join(inps.gdir, 'lon_%02d.rdr'%(i+1))
cmd = "{}/look.py -i {} -o {} -r {} -a {}".format(
SCR_DIR,
lons[slave_burst_id],
lon_looked,
inps.rlks,
inps.alks)
runCmd(cmd)
logr_looked = 'logr_%02d_%drlks_%dalks.float' % (i + 1, inps.rlks, inps.alks)
# cmd = "imageMath.py -e='log10((a_0)/(a_1+(a_1==0)))*(a_0!=0)*(a_1!=0)' --a={} -o {} -t float -s BIP".format(
# amp_looked,
# logr_looked)
# runCmd(cmd)
amp_looked_data = np.fromfile(amp_looked, dtype=np.float32).reshape(length_looked, width_looked * 2)
m = amp_looked_data[:, 0:width_looked * 2:2]
s = amp_looked_data[:, 1:width_looked * 2:2]
# Only for S1-LAR before v2.0! Apre/Aco:
# logr_looked_data = np.log10( (m+(m==0)) / (s+(s==0)) ) * (m!=0) * (s!=0)
# Only for S1-LAR v2.0 onwards! Aco/Apre (-ve value is openwater flood, +ve value is veg-flood)
logr_looked_data = np.log10(old_div((s + (s == 0)), (m + (m == 0)))) * (m != 0) * (s != 0)
#remove white edges
upper_edge = 0
for k in range(length_looked):
if logr_looked_data[k, int(old_div(width_looked, 2))] != 0:
upper_edge = k
break
lower_edge = length_looked - 1
for k in range(length_looked):
if logr_looked_data[length_looked - 1 - k, int(old_div(width_looked, 2))] != 0:
lower_edge = length_looked - 1 - k
break
left_edge = 0
for k in range(width_looked):
if logr_looked_data[int(old_div(length_looked, 2)), k] != 0:
left_edge = k
break
right_edge = width_looked-1
for k in range(width_looked):
if logr_looked_data[int(old_div(length_looked, 2)), width_looked - 1 - k] != 0:
right_edge = width_looked-1-k
break
print('four edgeds: lower: {}, upper: {}, left: {}, right: {}'.format(lower_edge, upper_edge, left_edge, right_edge))
flag = np.zeros((length_looked, width_looked), dtype=np.float)
delta = 3
flag[upper_edge + delta:lower_edge - delta, left_edge + delta:right_edge - delta] = 1.0
logr_looked_data *= flag
logr_looked_data.astype(np.float32).tofile(logr_looked)
create_xml(logr_looked, width_looked, length_looked, 'float')
#width = getWidth(lon_looked + '.xml')
#length = getLength(lon_looked + '.xml')
lat_looked_data = np.fromfile(lat_looked, dtype=np.float64).reshape(length_looked, width_looked)
lon_looked_data = np.fromfile(lon_looked, dtype=np.float64).reshape(length_looked, width_looked)
lat_max = np.amax(lat_looked_data)
lat_min = np.amin(lat_looked_data)
lon_minmax = post_process_lon_max(lon_looked_data)
if len(lon_minmax) == 1:
# normal case
lon_min, lon_max = lon_minmax[0]
bbox = [lat_min, lat_max, lon_min, lon_max]
print(f"lat_min:{lat_min}. lat_max:{lat_max}, lon_min:{lon_min}, lon_max:{lon_max}")
print(f"bbox:{bbox}")
logr_looked_geo = 'logr_%02d_%drlks_%dalks.float.geo' % (i + 1, inps.rlks, inps.alks)
cmd = f"{SCR_DIR}/geo_with_ll.py -input {logr_looked} -output {logr_looked_geo} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox}\" -ssize {inps.ssize} -rmethod 1"
runCmd(cmd)
amp_looked_geo = 'amp_%02d_%drlks_%dalks.amp.geo' % (i + 1, inps.rlks, inps.alks)
cmd = f"{SCR_DIR}/geo_with_ll.py -input {amp_looked} -output {amp_looked_geo} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox}\" -ssize {inps.ssize} -rmethod 1"
runCmd(cmd)
else:
# case where it passes anti-meridian, we geocode twice:
lon_min_ap, lon_max_ap = lon_minmax[0]
lon_min_na, lon_max_na = lon_minmax[1]
bbox_ap = [lat_min, lat_max, lon_min_ap, lon_max_ap]
bbox_na = [lat_min, lat_max, lon_min_na, lon_max_na]
logr_looked_geo_ap = 'logr_%02d_%drlks_%dalks_AP.float.geo' % (i + 1, inps.rlks, inps.alks)
logr_looked_geo_na = 'logr_%02d_%drlks_%dalks_NA.float.geo' % (i + 1, inps.rlks, inps.alks)
cmd = f"{SCR_DIR}/geo_with_ll.py -input {logr_looked} -output {logr_looked_geo_ap} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox_ap}\" -ssize {inps.ssize} -rmethod 1 && " \
f"{SCR_DIR}/geo_with_ll.py -input {logr_looked} -output {logr_looked_geo_na} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox_na}\" -ssize {inps.ssize} -rmethod 1"
runCmd(cmd)
amp_looked_geo_ap = 'amp_%02d_%drlks_%dalks_AP.amp.geo' % (i + 1, inps.rlks, inps.alks)
amp_looked_geo_na = 'amp_%02d_%drlks_%dalks_NA.amp.geo' % (i + 1, inps.rlks, inps.alks)
cmd = f"{SCR_DIR}/geo_with_ll.py -input {amp_looked} -output {amp_looked_geo_ap} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox_ap}\" -ssize {inps.ssize} -rmethod 1 && " \
f"{SCR_DIR}/geo_with_ll.py -input {amp_looked} -output {amp_looked_geo_ap} " \
f"-lat {lat_looked} -lon {lon_looked} -bbox \"{bbox_na}\" -ssize {inps.ssize} -rmethod 1"
runCmd(cmd)
os.remove(amp)
os.remove(amp_looked)
os.remove(lat_looked)
os.remove(lon_looked)
os.remove(logr_looked)
os.remove(amp+'.xml')
os.remove(amp_looked+'.xml')
os.remove(lat_looked+'.xml')
os.remove(lon_looked+'.xml')
os.remove(logr_looked+'.xml')
os.remove(amp+'.vrt')
os.remove(amp_looked+'.vrt')
os.remove(lat_looked+'.vrt')
os.remove(lon_looked+'.vrt')
os.remove(logr_looked+'.vrt')
#log_ratio.py -mdir /u/hm/NC/data/S1-COH_STCM3S3_TN077_20160929T231332-20161011T231433_s1-resorb-v1.0/master -sdir /u/hm/NC/data/S1-COH_STCM3S3_TN077_20160929T231332-20161011T231433_s1-resorb-v1.0/fine_coreg -gdir /u/hm/NC/data/S1-COH_STCM3S3_TN077_20160929T231332-20161011T231433_s1-resorb-v1.0/geom_master -rlks 7 -alks 2
| null |
script/log_ratio.py
|
log_ratio.py
|
py
| 15,313 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "subprocess.check_call",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.ElementTree",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.ElementTree",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "isceobj.createSlcImage",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "isceobj.createIntImage",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "isceobj.createAmpImage",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "isceobj.Image.createUnwImage",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "isceobj.Image",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "isceobj.createImage",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.nanmin",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.gradient",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.gradient",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.fliplr",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.nanmin",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.nanmax",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "numpy.nanmin",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "numpy.nanmax",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "builtins.range",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "ntpath.basename",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "ntpath.basename",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.complex64",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "numpy.fromfile",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.complex64",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.absolute",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "numpy.fromfile",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "numpy.log10",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "numpy.fromfile",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "numpy.fromfile",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "numpy.amax",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 370,
"usage_type": "call"
}
] |
566187498
|
import glob
import markdown
import json
import pprint
import uuid
# Get dir list of MD files.
mds = sorted(glob.glob("./*.md"), reverse=True)
# Create an empty array for data storing.
data = []
# Iterate over the MD files list and extract yaml data.
for md in mds:
with open(md) as f:
md = markdown.Markdown(extensions = ['full_yaml_metadata'])
s = f.read()
md.convert(s)
if md.Meta:
date = md.Meta['date'].split('/')
md.Meta['md'] = s
md.Meta['id'] = str(uuid.uuid3(uuid.NAMESPACE_DNS, s))
md.Meta['year'] = date[2]
md.Meta['month'] = date[1]
data.append(md.Meta)
# Export data as JSON.
with open('blogList.json', 'w') as f:
json.dump(data, f)
# Pretty print the extracted data.
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
# Output some useful info.
print('='*80)
print('# of files:', len(mds), sep='\t')
print('# of YAMLs:', len(data), sep='\t')
print('='*80)
| null |
markdowns/getBlogList.py
|
getBlogList.py
|
py
| 985 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "glob.glob",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "markdown.Markdown",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "uuid.uuid3",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "uuid.NAMESPACE_DNS",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 32,
"usage_type": "call"
}
] |
357317269
|
from flask import Flask , render_template
from wtforms_fields import *
from models import *
app = Flask(__name__)
app.secret_key = 'replace key later'
#configure database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://ozawkdoyvqhipc:f35523972b309aab0ab3a73978616ee325ed23ae5808eb143c1174a77ff7a56d@ec2-54-197-234-117.compute-1.amazonaws.com:5432/drrtj2v553ai9'
db = SQLAlchemy(app)
@app.route("/",methods=['GET','POST'])
def index():
reg_form = RegistrationForm()
if reg_form.validate_on_submit():
return "registered"
return render_template('index.html', form = reg_form)
@app.route("/about")
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(debug=True,port=7000)
| null |
application.py
|
application.py
|
py
| 744 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 27,
"usage_type": "call"
}
] |
540010577
|
# Clustering with Hedonic Games
# Detecting communities in networks with cooperative game theory
# A research experiment in colaboration between *UFRJ and ^INRIA
# *Lucas Lopes, *Daniel Sadoc, ^Kostya and ^Giovanni
# December 2019
## Import Dependecies ##########################################################
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
import networkx as nx
import pandas as pd
import numpy as np
from dash.dependencies import Input, Output, State
from hedonic import Game
from players import sequential
################################################################################
## Helper Functions ############################################################
################################################################################
game = Game()
game.replay('KAR_0.95_GRE_r0.5_i13_v52.50_e23.43_abs14.75.pickle')
## Get list of Networks ########################################################
def get_existing_networks():
options = []
networks = Game.show_networks(get=True)
for net in networks:
option = {'label': net, 'value': net[:3].upper()}
options.append(option)
return options
def get_iteration_data():
total_verts = game.infos['verts']
total_edges = game.infos['edges']
acc = np.array(game.hist['accumulated'])
verts_yes = np.array(game.hist['verts_yes'])
edges_yes = np.array(game.hist['edges_yes'])
edges_no = np.array(game.hist['edges_no'])
pot_yes, pot_no = game.global_potential(verts_yes, edges_yes, edges_no, sum=False)
potential_prop = pot_yes / (pot_yes + pot_no)
edges_yes_prop = edges_yes / total_edges
edges_off_prop = ((total_edges - edges_yes - edges_no) / total_edges) + edges_yes_prop
verts_yes_prop = verts_yes / total_verts
iterations, instant = [0], []
for i in range(len(acc)-1):
iterations.append(i+1)
instant.append(acc[i+1]-acc[i])
return {
'iterations': iterations,
'instantaneous' : instant,
'accumulated' : acc,
'potential_prop': potential_prop,
'verts_yes_prop': verts_yes_prop,
'edges_yes_prop': edges_yes_prop,
'edges_off_prop': edges_off_prop }
game_data = get_iteration_data()
## Plot a Graph ################################################################
def plot_graph(G = nx.random_geometric_graph(200, 0.125)): # nx.karate_club_graph()): #
# V=range(N)# list of vertices
# g=nx.Graph()
# g.add_nodes_from(V)
# g.add_edges_from(E)# E is the list of edges
# pos=nx.fruchterman_reingold_layout(g)
edge_x = []
edge_y = []
for edge in G.edges():
# print(G.node[edge[0]])
x0, y0 = G.node[edge[0]]['pos']
x1, y1 = G.node[edge[1]]['pos']
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines')
node_x = []
node_y = []
for node in G.nodes():
x, y = G.node[node]['pos']
node_x.append(x)
node_y.append(y)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line_width=2))
node_adjacencies = []
node_text = []
for node, adjacencies in enumerate(G.adjacency()):
node_adjacencies.append(len(adjacencies[1]))
node_text.append('# of connections: '+str(len(adjacencies[1])))
node_trace.marker.color = node_adjacencies
node_trace.text = node_text
# Another option would be to size points by the number of connections i.e. node_trace.marker.size = node_adjacencies
fig = go.Figure(data=[edge_trace, node_trace],
layout=go.Layout(
title='<br>Visualize the Network',
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
annotations=[ dict(
text="Verts: V and Edges: E",
showarrow=False,
xref="paper", yref="paper",
x=0.005, y=-0.002 ) ],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
return fig
################################################################################
## Dash Divs ###################################################################
################################################################################
## 1. Header ###################################################################
Header = html.Div(children=[
html.H1('Hedonic Games'),
html.Div([
html.P("Detecting communities in networks with cooperative game theory."),
html.P("A research experiment in colaboration between *UFRJ and ^INRIA."),
html.P("*Lucas Lopes, *Daniel Sadoc, ^Kostya and ^Giovanni."),
html.P("December 2019")]),
html.H2('Run an Experiment')],
style={'textAlign': 'center'})
## 1.1 Run and Experiments #####################################################
RunExperiments = html.Div(style={'columnCount': 2}, children=[
html.Label('Choose a Network:'), # Choose a Network
dcc.Dropdown(
id='network-selection',
options=get_existing_networks(),
multi=False,
value='DAG'),
html.Label('Or upload yours:'), # Upload a Network - TO_DO: update dropdown
dcc.Upload(
id='upload-network',
children=html.Div([ # Add a new experiment by
'Drag and Drop or ',
html.A('Select .CSV')]),
style={
'width': '100%',
'height': '50px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'},
multiple=True), # Allow multiple files to be uploaded
html.Label('Alpha', id='alpha-value'),
dcc.Slider(
id='alpha-slider',
min=0,
max=1,
step=0.05,
value=0.95),
dcc.Tabs(id="tabs-init-mode", value='select-init-mode', children=[
dcc.Tab(
label="Initial mode: 'Select'",
value='select-init-mode',
children=[
html.Label('Select Nodes: ', id='select-nodes-value'),
dcc.Input(
id="nodes-selected",
type='text',
placeholder="['node', 'name', '...']")]),
dcc.Tab(
label="Initial mode: 'Random'",
value='random-init-mode',
children=[
html.Label('Random Classification: ', id='random-value'),
dcc.Slider(
id='init-random-slider',
min=0,
max=1,
step=0.05,
value=0.5)])]),
html.Label('Options:'),
dcc.Checklist(
options=[
{'label': 'Verbose', 'value': 'ver'},
{'label': 'Export', 'value': 'exp'} ],
value=['ver', 'exp']),
html.Button('Run!', id='run-button', style={'width': '100%'}),
html.Textarea( # TODO: only show when running
id='running-message',
style={'width':'100%','margin':'10px'}),
dcc.Graph(
id='network-preview',
figure=go.Figure(plot_graph()))])
## 2. Vizualize Experiments ####################################################
## 2.1 Table Results ###########################################################
df = pd.read_csv('experiments/results.csv')
TableResults = html.Div(children=[
html.H2('Visualize Experiments Results', style={'textAlign': 'center'}), # H1?
html.H3('Experiments Results', style={'textAlign': 'center'}),
dash_table.DataTable(
id='experiments-datatable',
columns=[
{"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns],
data=df.to_dict('records'),
style_cell_conditional=[ {
'if': {'column_id': c},
'textAlign': 'left'
} for c in ['Date', 'Region'] ],
style_header={
'backgroundColor': 'light_blue',
'fontWeight': 'bold' },
filter_action="native",
sort_action="native",
sort_mode="multi",
row_selectable="single",
selected_rows=[0],
style_table={
'overflowX': 'scroll',
'overflowY': 'scroll',
'maxHeight': '450px',
'minWidth': '100%'},
fixed_columns={ 'headers': True, 'data': 1 },
fixed_rows={ 'headers': True, 'data': 0 },
style_cell={
# all three widths are needed
'minWidth': '180px', 'width': '180px', 'maxWidth': '360px',
'overflow': 'hidden', 'padding': '7px',
'textOverflow': 'ellipsis'}),
html.Div(id='datatable-interactivity-container'),
html.Label('Or upload yours:'), # Upload a Network
dcc.Upload(
id='upload-experiment',
children=html.Div([ # Add a new experiment by
'Drag and Drop or ',
html.A('Select .PKL')]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'},
multiple=True)])
## 2.2 Instantaneous Gain ######################################################
InstantGain = html.Div(children=[
html.H4('Instantaneous Gain', style={'textAlign': 'center'}),
# dcc.Graph(id="instant-graph")]) # figure=instant
dcc.Graph(figure=go.Figure(data=go.Scatter(x=game_data['iterations'], y=game_data['instantaneous'])))])
## 2.3 Accumulated Gain ########################################################
AccumulatedGain = html.Div(children=[
html.H4('Accumulated Gain', style={'textAlign': 'center'}),
dcc.Graph(figure=go.Figure(data=go.Scatter(x=game_data['iterations'], y=game_data['accumulated'])))])
## 2.4 Potential Proportion ####################################################
PotentialProportion = html.Div(children=[
html.H4('Potential Proportion', style={'textAlign': 'center'}),
dcc.Graph(figure=go.Figure(data=go.Scatter(x=game_data['iterations'], y=game_data['potential_prop'])))])
## 2.5 Vertices Proportion #####################################################
VerticesProportion = html.Div(children=[
html.H4('Vertices Proportion', style={'textAlign': 'center'}),
dcc.Graph(figure=go.Figure(data=go.Scatter(x=game_data['iterations'], y=game_data['verts_yes_prop'])))])
## 2.5 Edges Proportion ########################################################
EdgesProportion = html.Div(children=[
html.H4('Edges Proportion', style={'textAlign': 'center'}),
dcc.Graph(figure=go.Figure(data=[
go.Scatter(x=game_data['iterations'], y=game_data['edges_yes_prop'], name='Edges Yes Proportion'),
go.Scatter(x=game_data['iterations'], y=game_data['edges_off_prop'], name='Edges Off Proportion')]))])
## 2.6 Number of moves #########################################################
################################################################################
## App Server ##################################################################
################################################################################
## App Layout ##################################################################
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
Header,RunExperiments,
TableResults,InstantGain,AccumulatedGain,PotentialProportion,VerticesProportion,EdgesProportion])
#Experiments,TableResults,Networks,Iterations])
################################################################################
## Decorators ##################################################################
################################################################################
@app.callback(
Output(component_id='alpha-value', component_property='children'),
[Input(component_id='alpha-slider', component_property='value')])
def update_alpha_value(alpha_value):
return f'Alpha: {alpha_value}'
@app.callback(
Output(component_id='random-value', component_property='children'),
[Input(component_id='init-random-slider', component_property='value')])
def update_random_mode_value(random_value):
return f'Random Classification: {random_value}'
@app.callback(
Output(component_id='network-preview', component_property='figure'),
[Input(component_id='network-selection', component_property='value')])
def update_network(network):
net = nx.read_edgelist(f'networks/{network}.csv')
return plot_graph(net)
@app.callback(
Output(component_id='running-message', component_property='children'),
[Input(component_id='run-button', component_property='n_clicks')],
[State('network-selection', 'value'), State('alpha-slider', 'value'),
State('nodes-selected', 'value'), State('init-random-slider', 'value')]) # todo: options checkbox
def run_game(n_clicks, network, alpha, init_mode, random_mode):
if n_clicks is None:
raise PreventUpdate
else:
game.load_network(network)
game.set_alpha(alpha)
if init_mode:
game.set_initial_state('s', init_mode)
else:
game.set_initial_state('r', random_mode)
game.play(sequential) # todo: better way to pass player
return "Elephants are the only animal that can't jump"
# todo: importar csv from id=upload-network
# @app.callback(
# Output('datatable-interactivity', 'style_data_conditional'),
# [Input('datatable-interactivity', 'selected_columns')] )
# def update_styles(selected_columns):
# return [{
# 'if': { 'column_id': i },
# 'background_color': '#D2F3FF'
# } for i in selected_columns]
# @app.callback(
# Output('instant-graph', 'figure'),
# [Input('selected-value', 'value'), Input('values-range', 'value')])
# def update_instant_graph(selected, values):
#
# instant = go.Figure()
# for i, y in enumerate(ys):
# instant.add_trace(go.Scatter(
# x=x, y=y, name=f'Karate {i+1}',
# line=dict(color=f'rgb({np.random.randint(0,255)},\
# {np.random.randint(0,255)},{np.random.randint(0,255)})', width=1)))
# x=x+x_rev,
# y=y1_upper+y1_lower,
# fill='toself',
# fillcolor='rgba(0,100,80,0.2)',
# line_color='rgba(255,255,255,0)',
# showlegend=False,
# name='Fair',
# return instant
#
# text = {"Max_TemperatureC": "Maximum Temperature", "Mean_TemperatureC": "Mean Temperature",
# "Min_TemperatureC": "Minimum Temperature"}
# dff = df[(df["values"] >= year[0]) & (df["values"] <= values[1])]
# trace = []
# for type in selected:
# trace.append(go.Scatter(x=dff["Date"], y=dff[type], name=text[type], mode='lines',
# marker={'size': 8, "opacity": 0.6, "line": {'width': 0.5}}, ))
#
# x, y = get_instant_gain()
# trace = []
# trace.append(go.Scatter(
# x=x,
# y=y,
# fill='toself',
# fillcolor='rgba(0,100,80,0.2)',
# line_color='rgba(255,255,255,0)',
# showlegend=False,
# name='Fair',
# mode='lines',
# marker={'size': 8, "opacity": 0.6, "line": {'width': 0.5}} ))
#
# return {"data": trace,
# "layout": go.Layout(title="Instantaneous Gain", colorway=['#fdae61', '#abd9e9', '#2c7bb6'],
# yaxis={"title": "Profit on move"}, xaxis={"title": "Iteration"})}
# @app.callback(
# Output('datatable-interactivity-container', "children"),
# [Input('datatable-interactivity', "derived_virtual_data"),
# Input('datatable-interactivity', "derived_virtual_selected_rows")])
# def update_graphs(rows, derived_virtual_selected_rows):
# When the table is first rendered, `derived_virtual_data` and
# `derived_virtual_selected_rows` will be `None`. This is due to an
# idiosyncracy in Dash (unsupplied properties are always None and Dash
# calls the dependent callbacks when the component is first rendered).
# So, if `rows` is `None`, then the component was just rendered
# and its value will be the same as the component's dataframe.
# Instead of setting `None` in here, you could also set
# `derived_virtual_data=df.to_rows('dict')` when you initialize
# the component.
#
#
# if derived_virtual_selected_rows is None:
# derived_virtual_selected_rows = []
#
# dff = df if rows is None else pd.DataFrame(rows)
#
# colors = ['#7FDBFF' if i in derived_virtual_selected_rows else '#0074D9'
# for i in range(len(dff))]
# return [
# dcc.Graph(
# id=column,
# figure={
# "data": [ {
# "x": dff["country"],
# "y": dff[column],
# "type": "bar",
# "marker": {"color": colors} } ],
# "layout": {
# "xaxis": {"automargin": True},
# "yaxis": {
# "automargin": True,
# "title": {"text": column} },
# "height": 250,
# "margin": {"t": 10, "l": 10, "r": 10} } } )
# # check if column exists - user may have deleted it
# # If `column.deletable=False`, then you don't
# # need to do this check.
# for column in ["pop", "lifeExp", "gdpPercap"] if column in dff ]
## Run Server ##################################################################
if __name__ == '__main__':
# if 'DYNO' in os.environ:
# app_name = os.environ['DASH_APP_NAME']
# else:
# app_name = 'Hedonic Ploting'
app.run_server(debug=True)
| null |
app.py
|
app.py
|
py
| 19,053 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "hedonic.Game",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "hedonic.Game.show_networks",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "hedonic.Game",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "networkx.random_geometric_graph",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Layout",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H1",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H2",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Upload",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "dash_html_components.A",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Slider",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Tabs",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Tab",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Input",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Tab",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Slider",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Checklist",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Button",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Textarea",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H2",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H3",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Upload",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "dash_html_components.A",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H4",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H4",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H4",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H4",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H4",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "dash.Dash",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "networkx.read_edgelist",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "players.sequential",
"line_number": 377,
"usage_type": "argument"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 366,
"usage_type": "call"
}
] |
45956031
|
import control as ctrl
import numpy as np
from utils import insert
class Controller:
specs = None;
Gff = None;
Gc = None;
def __init__ (self, specs = None):
self.specs = specs;
self.instantiate ();
def instantiate (self, specs = None):
if specs is not None:
self.specs = specs;
self.Kc = insert (1, self.specs, 'Kc');
self.Ti = insert (np.inf, self.specs, 'Ti');
self.Td = insert (0, self.specs, 'Td');
self.b = insert (1.0, self.specs, 'b');
self.c = insert (1.0, self.specs, 'c');
self.N = insert (9.0, self.specs, 'N');
num_p = self.Kc * self.b;
den_p = 1;
Cpff = ctrl.tf (num_p, den_p);
Cpc = ctrl.tf (self.Kc, 1);
if (self.Ti != np.inf):
num_i = self.Kc;
den_i = [self.Ti, 0];
Ci = ctrl.tf (num_i, den_i);
else:
Ci = 0;
if (self.Td != 0):
num_d = [self.Kc * self.Td * self.c, 0];
den_d = [(self.Td / self.N), 1];
Cdff = ctrl.tf (num_d, den_d);
Cdc = ctrl.tf ([self.Kc * self.Td], den_d);
else:
Cdff = 0;
Cdc = 0;
self.Gff = Cpff + Ci + Cdff;
self.Gc = Cpc + Ci + Cdc;
| null |
industrialPID_trab_1/pid_industrial/controller.py
|
controller.py
|
py
| 1,306 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.insert",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.insert",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "utils.insert",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utils.insert",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "utils.insert",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.insert",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "control.tf",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "control.tf",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "control.tf",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "control.tf",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "control.tf",
"line_number": 44,
"usage_type": "call"
}
] |
482520443
|
from App8.models import ProductModel
from django import forms
class ProductForm(forms.ModelForm):
pno = forms.IntegerField(min_value=1001)
class Meta:
fields = '__all__'
model = ProductModel
def clean_price(self):
price = self.cleaned_data['price']
if price >= 1000:
return price
else:
raise forms.ValidationError('Price must be 1000 or Abvoe')
def clean_quantity(self):
qty=self.cleaned_data['quantity']
if qty >0:
return qty
else:
raise forms.ValidationError('Quantity Must Not be 0 ')
| null |
App8/forms.py
|
forms.py
|
py
| 622 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "App8.models.ProductModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 24,
"usage_type": "name"
}
] |
84121631
|
import statistics
import random
import timeit
import multiprocessing
from threading import Thread
class Producer:
def __init__(self,buff,companies):
self.buffer=buff
self.companies = companies
def getRandomQuote(self):
return [self.companies[random.randint(0,len(self.companies)-1)],random.randint(1,100)]
def run(self):
for i in range(1000):
a=self.getRandomQuote()
self.buffer.add(a[0],a[1])
class Buffer:
def __init__(self):
self.quotes=[] # quotes to be stored in list in format [['GOOG',15],['AMAZ',20],...]
def add(self,company,price):
self.quotes.append([company,price])
def remove(self):
res = self.quotes[0]
self.quotes.remove(self.quotes[0])
return res
def isEmpty(self):
return self.quotes == []
class Consumer:
def __init__(self,buff,companies):
self.buffer=buff # initializes a buffer attribute buff
self.quotes={i:[] for i in companies} # quotes to be stored in format {'GOOG':[1, 2, 3, 4], 'AMAZ': [5, 6, 7, 8],...}
def run(self):
i=1
while i!=1000:
if not self.buffer.isEmpty():
a=self.buffer.remove()
self.quotes[a[0]].append(a[1])
i+=1
for key in self.quotes:
print('The highest value for ' + key + ' is ' + str(max(self.quotes[key])))
print('The lowest value for ' + key + ' is ' + str(min(self.quotes[key])))
print('The average value for ' + key + ' is ' + str(int(statistics.mean(self.quotes[key])))+'\n')
if __name__ == "__main__":
T1= timeit.default_timer()
b= Buffer()
comp = ['AAPL','AMZN','GOOG','FB','CSCO','CMCSA','AMGN','ADBE','GILD','COST']
p = Producer(b,comp)
c = Consumer(b,comp)
t1= Thread(target=p.run)
t2 = Thread(target=c.run)
t1.start()
t2.start()
t1.join()
t2.join()
T2=timeit.default_timer()
print("Total elapsed time is " + str(T2-T1))
| null |
ProducerConsumer.py
|
ProducerConsumer.py
|
py
| 1,787 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 68,
"usage_type": "call"
}
] |
533969280
|
from .utils import ConfigureError, merge_clips
class TrimFrames:
def __init__(self, frames):
if len(frames) == 0:
raise ConfigureError('TrimFrames: frames length is 0')
self.frames = frames
def __call__(self, core, clip):
clips = [
core.std.Trim(clip, first=first, last=last)
for first, last in self.frames
]
return merge_clips(clips)
| null |
conf/filters/trim_frames.py
|
trim_frames.py
|
py
| 422 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.ConfigureError",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.merge_clips",
"line_number": 15,
"usage_type": "call"
}
] |
521034346
|
from django.contrib import admin
from django.urls import path
from .views import *
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
# path('article/', article_list),
path('services/', ServicesAPIView.as_view()),
path('service/<int:id>/', ServicesDetailsView.as_view()),
path('profiles/', ProfileAPIView.as_view()),
path('profile/<int:id>/', ProfileDetailsView.as_view()),
path('elders/',ElderListView.as_view()),
path('elders/<int:id>/',ElderDetailView.as_view()),
path('volunteers/<int:id>/',GetVolunteers.as_view()),
path('feedback/',FeedbackSubmitAPIView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| null |
SeniorSidekick/backend/Sunshine/api/urls.py
|
urls.py
|
py
| 687 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rest_framework.urlpatterns.format_suffix_patterns",
"line_number": 17,
"usage_type": "call"
}
] |
184625493
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""system start python file.
all start from Server class
work_func you can do implement your work
CollectorProcessTask is collect task implement
MQConsumerTask receive messages decode and run collect server
CollectorAnalyseTask is analyse task implement
MyHeartBeatTask is my new heart beat task implement
ManagerImp is my new http interface implement
"""
import copy
import datetime
import json
import platform
import time
import uuid
import os
import re
import configparser
import ast
import psutil
from logstash import TCPLogstashHandler
from oslo_log import log
from data_transmission.application.helper import json_loads, json_dumps
from data_transmission.sdk.mq import MQClient
from data_transmission.sdk.server_manager import Manager, Server, Task
from data_transmission.sdk.server_manager import HeartBeatTask
from data_transmission.sdk.common.commfunc import get_id, get_macs_ips
from data_transmission.sdk.update_helper.update_self import UpdateSelf
from data_transmission.sdk.server_manager import MQProcess, ProcessManage, KafkaTask
from data_transmission.application.helper.kafka_helper import KafkaHelper
from data_transmission.application.helper.elasticsearch_operator import ElasticsearchHelper
from data_transmission.application.helper.file_operator import FileOperator
from data_transmission.application.helper.jpype_helper import JpypeHelper
from data_transmission.application.helper.check_except import CheckExcept
from data_transmission.application.helper.download_helper import DownloadPackage
from data_transmission.application.helper.mysql_helper import current_level_code, es_history_time
from data_transmission.application.helper.mysql_helper import MysqlHelper
_MQ_CONSUMER = None
CONFIG = None
HOST_IP = None
HOST_PORT = None
LOG = log.getLogger('log')
LOG_WARNING = log.getLogger()
_SERVER_START_TIME = int(time.time())
SERVER_STATE = True
SERVER_UUID = str(uuid.uuid4())
LOG_TO_LOGSTASH_PORT = 8156
filepath = os.path.abspath(__file__)
filepath_split = filepath.split("/")
_SERVER_NAME = filepath_split[-4]
PACKAGE_PATH = None
ENV_PATH = None
ZOOKEEPER = None
_ENV_ARGS = None
FILEOPERATOR = FileOperator()
JPYPEHELPER = None
MYSQLHELPER = None
# 定义全局队列
class ManagerImp(Manager):
""" manage server task processing controlled by http.
task manage server,provide a group of interface that start stop and restart server's tasks.
"""
def __init__(self):
LOG.info("ManagerImp start")
super(ManagerImp, self).__init__()
self.env_args = _ENV_ARGS
def start(self, body):
""" start all task.
start server's tasks
:return: 1 成功
-1 传入格式错误
"""
return "imp start"
def stop(self, body):
"""stop all task.
stop server's tasks.
:return: 1 成功
-1 传入格式错误
"""
return "imp stop"
def restart(self):
"""restart all task.
restart server's tasks.
:return: imp restart
:rtype: str
"""
# self.stop()
#
# self.start()
return 'imp restart'
@staticmethod
def process_start(body):
""" start all task.
start server's tasks
:return: 1 成功
-1 传入格式错误
"""
global _MQ_CONSUMER
global CONFIG
result = dict()
if "process_num" in body:
process_num = body["process_num"]
if isinstance(process_num, str):
process_num = int(process_num)
for count in range(process_num):
try:
_MQ_CONSUMER = MQConsumerTask(str(uuid.uuid4()), CONFIG)
ProcessManage().add_task(_MQ_CONSUMER)
ProcessManage().start(_MQ_CONSUMER.name())
except Exception as e:
result["message"] = -1
result["error"] = "失败原因: %s,开启成功数:%d" % (e, count)
return result
result["message"] = 1
result["error"] = ""
else:
result["message"] = -2
result["error"] = "失败原因: 传入格式错误"
return result
@staticmethod
def process_stop(body):
"""stop all task.
stop server's tasks.
:return: 1 成功
-1 传入格式错误
"""
result = dict()
temp_pid = list()
if "pid" in body and body["pid"] is not None:
try:
if not isinstance(body["pid"], list):
temp_pid.append(int(body["pid"]))
ProcessManage().stop(temp_pid)
else:
ProcessManage().stop(body["pid"])
result["message"] = 1
result["error"] = ""
return result
except Exception as e:
result["message"] = -1
result["error"] = e
else:
result["message"] = -2
result["error"] = "传入格式错误"
return result
def update(self, request):
"""
/manage/update
:param request:
:return:
"""
global _ENV_ARGS
self.env_args = _ENV_ARGS
response = dict()
try:
msg = json.loads(request.body.decode('utf-8'))
except:
response['result'] = 0
response['msg'] = u'请求消息格式异常,无法处理'
return response
global CONFIG
logstash_host = CONFIG['logstash_info']['host']
handlers = list()
for logstash_item in str(logstash_host).split(','):
(host, port) = str(logstash_item).split(':')
logstash_handler = TCPLogstashHandler(host, int(port),
version=1, message_type='logstash')
handlers.append(logstash_handler)
result = UpdateSelf(env_args=self.env_args, update_args=msg, handlers=handlers).update()
if result:
response['result'] = 0
response['msg'] = result
return response
else:
response['result'] = 1
response['msg'] = '升级进程已启动'
return response
class MyHeartBeatTask(HeartBeatTask):
""" heartbeat report server.
Report status information to monitor server.
Attributes:
start_time: task started time used for recording operating time
"""
def __init__(self, config):
"""init heartbeat task.
upload server state information to monitor server.
:param config: server connection info
"""
LOG.info("myheartbeat start")
super(MyHeartBeatTask, self).__init__(config)
self.start_time = _SERVER_START_TIME
self.config = config
self._version = self.get_version()
self.node_path = ""
def do(self, *args, **kwargs):
""" inherit handler.
make up a dict structure data,and put it to rabbit-mq no-callback queue.
"""
global _SERVER_NAME
msg = dict()
msg['server_uuid'] = get_id("data_transmission")
msg['server_type'] = "data_transmission"
msg['server_version'] = self._version
msg['server_state'] = 1
msg['server_time'] = int(time.time()) - self.start_time
msg['server_host_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
pid_dict = self.get_process_status()
msg['server_pid'] = pid_dict
server_host_info = dict()
server_host_info['cpu'] = psutil.cpu_percent(0)
server_host_info['memory'] = psutil.virtual_memory().percent
host_ips = list()
mac_ip_info = get_macs_ips()
for each in mac_ip_info:
host_ips.append(mac_ip_info[each][-1])
server_host_info['host_ip'] = host_ips
msg['server_host_info'] = server_host_info
global HOST_PORT
global HOST_IP
http_info = dict()
http_info["server_ip"] = HOST_IP
http_info["server_port"] = HOST_PORT
msg['http_info'] = http_info
database_info = dict()
if 'db' in self._config:
database_info['db_server_ip'] = self._config['db']['host']
database_info['db_server_port'] = self._config['db']['port']
database_info['db_user'] = self._config['db']['user']
database_info['db_pwd'] = self._config['db']['password']
msg['database_info'] = database_info
cache_info = dict()
if 'memcache' in self.config:
cache_info['cache_server_ip'] = self._config['memcache']['host']
cache_info['cache_server_port'] = self._config['memcache']['port']
msg['cache_info'] = cache_info
MQClient.send_message(
self.config["mq_url"],
exchange_name="",
routing_key="server_state",
message=json.dumps(msg))
@staticmethod
def get_version():
location = os.path.realpath(__file__)
if len(re.findall("-(\d\.\d\.\d?)-", location)) > 0:
result = re.findall("-(\d\.\d\.\d?)-", location)[0]
return result
else:
LOG.info("匹配失败没有找到版本号")
return None
@staticmethod
def get_process_status():
pid_status = list()
killed_pid = list()
process_dict = ProcessManage().get_task()
for pid in process_dict.keys():
temp_dict = dict()
if type(pid) == int:
try:
process = psutil.Process(pid)
if process.status().lower() == "zombie":
killed_pid.append(pid)
else:
temp_dict["pid"] = pid
temp_dict["status"] = process.status().lower()
pid_status.append(temp_dict)
except:
killed_pid.append(pid)
else:
continue
if killed_pid:
ProcessManage().stop(killed_pid)
for item in killed_pid:
_, _ = os.waitpid(int(item), os.WNOHANG)
return pid_status
class AlarmDataFromKafka(KafkaTask):
"""
发送端从Kafka中获取告警数据并发送.
"""
def __init__(self, name, config):
self.db_url = config.get("db_url")
self.kafka_config = config.get("kafka")
self.kafka_host = self.kafka_config.get("host", "127.0.0.1")
self.kafka_port = self.kafka_config.get("port", 9092)
self.host = "{}:{}".format(self.kafka_host, self.kafka_port)
# kafka的告警数据的topic
self.topic = config.get("alarm_topic", "alarm-data")
self.group_id = "AlarmDataFromKafkaGroupId"
super(AlarmDataFromKafka, self).__init__(name, hosts=self.host, topic=self.topic, group_id=self.group_id)
self.queue_name = config.get("send_queue", "data_transmission_send")
# rabbitmq的url
self.mq_url = config.get("mq_url")
# 上报数据的目标节点的MQ信息
self.high_level = config.get("high_level")
self.current_node_level, self.level = current_level_code(self.db_url)
self.source_queue = config.get("source_queue", "data_transmission_source")
def do(self, message):
# if self.level == 1 or self.level == "1":
# return
try:
task_id = str(uuid.uuid4())
alarm_data = message.value
if isinstance(alarm_data, bytes):
alarm_data = alarm_data.decode('utf-8')
alarm_data = json_loads(alarm_data)
data = {"msgdata": alarm_data, "type": "alarm", "task_id": task_id, "queue_name": self.queue_name,
"receivers": self.high_level,
"mq_url": self.mq_url, "source_queue": self.source_queue}
print("--------------- send alarm-data:{} --------------------".format(data))
JPYPEHELPER.send("alarm", data)
except Exception as ex:
LOG_WARNING.error(u"发送端获取kafka中的告警数据时发生错误,错误原因为:{}".format(str(ex)))
class SendFileData(Task):
"""
每隔10min查询ES的聚合、统计数据,并落地生成文件.
"""
def __init__(self, name, config):
interval = config.get("time_of_es_query", "10m")
interval = self.calculate_time(interval)
super(SendFileData, self).__init__(name, interval=interval)
self.mq_url = config.get("mq_url")
self.db_url = config.get("db_url")
self.elastic_query_obj = ElasticsearchHelper(config.get("elastic"), config.get("db_url"))
# 发送端保存文件的路径
self.send_file_path = config.get("save_send_data", "/tmp/send_file")
# 文件数据发送到rabbitmq的队列名
self.queue_name = config.get("send_queue", "data_transmission_send")
# 每个文件的数量
self.each_file_number_of_es = config.get("number_of_es", "1000")
# 上报数据的目标节点的MQ信息
self.high_level = config.get("high_level")
self.current_node_level, self.level = current_level_code(self.db_url)
self.es_history_time = es_history_time(self.db_url)
self.source_queue = config.get("source_queue", "data_transmission_source")
def do(self):
# if self.level == 1:
# return
try:
# 以当前时间为文件名前缀
now_time_string = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
file_name = now_time_string
prefix = str(uuid.uuid4())
file_path = os.path.join(self.send_file_path, prefix)
# 将ES数据保存为本地文件
file_names = self.elastic_query_obj.save_data_to_file(file_name, file_path, self.es_history_time,
limit_size=self.each_file_number_of_es)
list_file_names_str = ",".join(file_names)
file_number = len(file_names)
print("------------------ send file number {} ------------------------".format(file_number))
for file_name in file_names:
target_id = str(uuid.uuid4())
file_content = FILEOPERATOR.read_file(file_name, file_path)
# 拼接消息发送到rabbitmq
data = {"file_name": list_file_names_str, "type": "file", "file_number": file_number,
"file_path": file_path, "task_id": target_id, "receivers": self.high_level,
"db_url": self.db_url, "current_file_name": file_name, "queue_name": self.queue_name,
"mq_url": self.mq_url, "source_queue": self.source_queue}
data.setdefault("transmission_data", file_content)
JPYPEHELPER.send("file", data)
except Exception as ex:
LOG_WARNING.error(u"发送端向rabbitmq发送文件时发生错误,错误原因为:{}".format(str(ex)))
@staticmethod
def calculate_time(time_of_query):
"""
计算查询ES的时间间隔.
:param time_of_query: 用户配置的时间.
时间的格式为: xxx s(S), xxx m(M), xxx (H), xxx d(D)
:return:
"""
time_string = time_of_query[: len(time_of_query) - 1]
try:
time_int = int(time_string)
except:
LOG_WARNING.error(u"配置的ES查询时间格式错误,配置的时间为:{}".format(time_of_query))
raise ValueError
if "s" in time_of_query or "S" in time_of_query:
result = time_int
elif "m" in time_of_query or "M" in time_of_query:
result = time_int * 60
elif "h" in time_of_query or "H" in time_of_query:
result = time_int * 60 * 60
elif "d" in time_of_query or "D" in time_of_query:
result = time_int * 60 * 60 * 24
else:
LOG_WARNING.error(u"配置的ES查询时间格式错误,配置的时间为:{}".format(time_of_query))
raise ValueError
return result
class SendVersionData(MQProcess):
"""
发送rabbitmq中的升级信息给下级节点,并发送升级包.(发送端)
"""
def __init__(self, name, config):
url = config['mq_url'] if 'mq_url' in config else ''
routing_key = config.get("update_queue", "update_server")
queue_name = "transmission_update"
self.mq_url = config.get("mq_url")
# 升级包发送的队列名
self.queue_name = config.get("send_queue", "data_transmission_send")
self.package_save_path = config.get("send_update_package", "/tmp/send_package")
# 单个文件的大小
self.single_file_size = config.get("single_file_size", "1M")
self.single_file_size_bytes = FILEOPERATOR.calculate_digits(self.single_file_size)
super(SendVersionData, self).__init__(name, url, "", queue_name, routing_key=routing_key)
self.download_obj = DownloadPackage()
# 下级上报数据的目标节点的MQ信息
self.low_level = config.get("low_level")
self.source_queue = config.get("source_queue", "data_transmission_source")
def do(self, body):
"""
从rabbitmq接收升级信息信息.
升级信息的格式为:
{"type": "update", "data": [{"server_name": server_name, "version": new_version, "path": path}, ...]}
:param body: rabbitmq messages.
:return:
"""
try:
if isinstance(body, bytes):
body = body.decode('utf-8')
json_data = json.loads(body)
task_id = str(uuid.uuid4())
update_records = list()
print("--------------- 收到的升级信息:{}".format(json_data))
# 此处需要等待2秒,等待的是java端操作数据库,不能数据状态不对(强制更新的状态和选择性更新的状态)
time.sleep(2)
# 将升级信息发送给下级节点
for each in json_data:
server_name = each.get("server_name")
version = each.get("version")
records = MYSQLHELPER.query_update(server_name, version)
update_records += records
if update_records:
data = {"msgdata": update_records, "type": "update", "task_id": task_id, "queue_name": self.queue_name,
"receivers": self.low_level, "case": json_data,
"mq_url": self.mq_url, "source_queue": self.source_queue}
# format_data = {"typeof": "update", "kwargs": data, "is_resend": False}
JPYPEHELPER.send("update", data)
except Exception as ex:
LOG_WARNING.error(u"发送端发送版本信息时发生错误,错误原因为:{}".format(str(ex)))
class MQConsumerTask(MQProcess):
"""MQ Consumer class.
从rabbitmq告警队列中获取数据并发送到Kafka的topic为alarm-data中.
Attributes:
name: task name unique
config: rabbit-mq connection info
"""
def __init__(self, name, config):
""" init func.
inherit MQTak and overwrite a new callback function for the queue consumer.
"""
url = config['mq_url'] if 'mq_url' in config else ''
routing_key = config.get('receive_queue', "data_transmission_receive")
queue_name = config.get("receive_queue", "data_transmission_receive")
# queue_name = "data_transmission_send"
super(MQConsumerTask, self).__init__(name, url, "", queue_name, routing_key)
self.kafka_config = config.get("kafka")
self.kafka_host = self.kafka_config.get("host", "127.0.0.1")
self.kafka_port = self.kafka_config.get("port", 9092)
self.kafka_host = "{}:{}".format(self.kafka_host, self.kafka_port)
self.alarm_topic = config.get("alarm_topic", "alarm-data")
# 接收端保存数据文件的路径
self.receive_file_path = config.get("save_receive_data", "/tmp/receive_file")
# 接收端保存升级包的文件路径
self.receive_package_path = config.get("receive_update_package", "/tmp/receive_package")
# 接收端,ES的配置信息
self.elastic_query_obj = ElasticsearchHelper(config.get("elastic"), config.get("db_url"))
# 发送端的ES配置信息
self.dst_es = config.get("dst_es")
# 存放拆分文件的临时变量
self.split_file_number = dict()
self.kafka_obj = KafkaHelper(config.get("db_url"))
self.mq_url = config.get("mq_url")
def do(self, body):
""" the main handle.
从rabbitmq中接收告警数据和文件数据并发送到本级的kafka中.
:param body: mq message
"""
try:
if isinstance(body, bytes):
body = body.decode('utf-8')
json_data = json.loads(body)
extra_data = json_data.get("data")
data_type = extra_data.get("type")
if data_type == "alarm":
print("------------ receive alarm data -----------------------")
task_id = json_data.get("task_id")
data = json_data.get("msgdata")
self.kafka_obj.send_to_kafka(task_id=task_id, data=data, topic=self.alarm_topic, hosts=self.kafka_host,
type_of="alarm", source="receive")
# 保存文件数据
elif data_type == "file":
print("------------ receive file data -----------------------")
es_data = extra_data.get("transmission_data")
temp_data = copy.deepcopy(json_data)
temp_data.get("data").pop("transmission_data")
file_name = extra_data.get("current_file_name")
task_id = json_data.get("task_id")
split_file_name = file_name.split("_")
file_path = os.path.join(self.receive_file_path, split_file_name[0])
send_params = dict()
send_params.setdefault("file_path", file_path)
send_params.setdefault("current_file_name", file_name)
# 保存文件数据
es_data = json_loads(es_data)
FILEOPERATOR.save_file(es_data, file_name, file_path)
# 将文件内容录入到ES中
self.elastic_query_obj.insert_into_es(task_id, send_params=send_params, typeof="file",
data=es_data, es_handler=self.dst_es, source="receive")
# 保存升级包
elif data_type == "package":
print("------------ receive package data -----------------------")
file_content = json_data.get("data")
file_name = json_data.get("file_name")
file_number = json_data.get("file_number")
FILEOPERATOR.save_file_bytes(file_content, file_name, self.receive_package_path)
if "file_number" in json_data:
now_number = self.split_file_number.get(file_name, 0)
self.split_file_number.setdefault(file_name, now_number + 1)
if file_number == now_number + 1:
LOG_WARNING.info(u"升级包传输完成")
# 清空缓存
self.split_file_number.pop(file_name)
else:
LOG_WARNING.info(u"升级包传输完成")
elif data_type == "update":
task_id = json_data.get("task_id")
update_records = json_data.get("msgdata", None)
if update_records:
MYSQLHELPER.insert_update(task_id, "update", update_records, "receive")
# 给monitor_server发送升级策略
try:
case_update = extra_data.get("case")
for update_record in update_records:
soft_state = update_record.get("soft_state")
if int(soft_state) == 20:
server_name = update_record.get("server_name")
version = update_record.get("version")
for each in case_update:
server_name_case = each.get("server_name")
version_case = each.get("version")
if server_name == server_name_case and version == version_case:
temp_update = [each]
if not isinstance(temp_update, str):
temp_update = json_dumps(temp_update)
MQClient.send_message(url=self.mq_url, exchange_name="", routing_key="update_server", message=temp_update)
except Exception as ex:
LOG_WARNING.error(u"mq消息发送失败,原因为:{}".format(str(ex)))
else:
LOG_WARNING.error(u"接收rabbitmq消息时,数据的格式不是预期要处理的格式,丢弃该数据。")
except Exception as ex:
LOG_WARNING.error(u"在处理rabbitmq消息时发生错误,错误原因:{}".format(str(ex)))
class ReceiveNotice(MQProcess):
def __init__(self, name, config):
mq_url = config.get("mq_url")
queue_name = config.get("notice_queue", "notice_queue")
routing_key = queue_name
super(ReceiveNotice, self).__init__(name, mq_url, "", queue_name, routing_key)
self.check_except = CheckExcept(config)
def do(self, body):
if isinstance(body, bytes):
body = body.decode('utf-8')
json_data = json_loads(body)
if not json_data:
LOG.info(u"反馈的数据格式不正确,不处理。")
return
print(json_data)
if not isinstance(json_data, list):
json_data = [json_data]
for each in json_data:
if isinstance(each, dict):
task_id = each.get("task_id")
state = each.get("state")
self.check_except.notice_message(task_id, state)
def get_env(config):
package_path = None
env_path = None
operate_system = platform.system().lower()
if operate_system == "linux":
env_args = config.get("linux_os")
else:
env_args = config.get("windows_os")
if env_args:
package_path = env_args.get("package_path")
env_path = env_args.get("env_path")
zookeeper = config.get("zk")
return package_path, env_path, zookeeper
def remove_log_file():
global _ENV_ARGS
logging_name = {
"error_name": "",
"info_name": "",
"access_name": ""
}
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir,
os.pardir
))
cp = configparser.ConfigParser()
file_path = os.path.join(possible_topdir, 'etc', 'logging.conf')
cp.read(file_path)
cp.sections()
logging_name["error_name"] = ast.literal_eval(cp._sections["handler_file"]['args'])[0]
logging_name["info_name"] = ast.literal_eval(cp._sections["handler_log_file"]['args'])[0]
logging_name["access_name"] = ast.literal_eval(cp._sections["handler_access_file"]['args'])[0]
env_path = _ENV_ARGS[1]
for value in logging_name.values():
if os.path.exists(env_path + value):
os.remove(env_path + value)
LOG.info("{} has removed".format(value))
else:
LOG.info("{} file is not exist".format(value))
def work_func(config):
""" the main server function.
main task added function, for this server,rabbit-mq receiver and handler,
inner processing queue monitor, send message to logstash.
:param config: database connection info ,rabbit-mq connection info,
logstash connection info is included
"""
LOG.info('start work func')
config['monitor_exchange'] = 'data_transmission'
global CONFIG, JPYPEHELPER, MYSQLHELPER
CONFIG = config
db_url = config.get("db_url")
JPYPEHELPER = JpypeHelper(db_url)
MYSQLHELPER = MysqlHelper(db_url)
global _MQ_CONSUMER
# 初始化java反馈通知
mq = config.get("mq")
notice_queue_name = config.get("notice_queue", "notice_queue")
JPYPEHELPER.init_notice(mq, notice_queue_name)
# 初始化java发送接口
source_queue = config.get("source_queue", "data_transmission_source")
JPYPEHELPER.init_send(mq, queue_name=source_queue)
# 初始化java接收接口
java_receive_queue = config.get("send_queue", "data_transmission_send")
java_send_queue = config.get("receive_queue", "data_transmission_receive")
JPYPEHELPER.receive_notice(mq, java_receive_queue, java_send_queue)
# 先检查之前是否有异常情况,需要再次发送数据
check_obj = CheckExcept(config)
check_obj.check()
# 本级kafka 告警数据消费(发送端)
_send_alarm = AlarmDataFromKafka("alarm_data_consumer", config)
# 本级ES 聚合、统计数据落地成文件并发送到rabbitmq队列中(发送端)
_send_file = SendFileData("file_data_save", config)
# 从rabbitmq接收升级信息,下载升级包,一并下发到下级节点(发送端)
_send_version = SendVersionData("send_version_data", config)
# 接收rabbitmq 本地接收数据(接收端)
_receive_mq_message = MQConsumerTask("receive_mq_message", config)
# 接收反馈通知(发送端)
_receive_notice = ReceiveNotice("receive_notice", config)
ProcessManage().add_task(_send_alarm)
ProcessManage().add_task(_send_file)
ProcessManage().add_task(_send_version)
ProcessManage().add_task(_receive_mq_message)
ProcessManage().add_task(_receive_notice)
ProcessManage().start(_send_alarm.name())
ProcessManage().start(_send_file.name())
ProcessManage().start(_send_version.name())
ProcessManage().start(_receive_mq_message.name())
ProcessManage().start(_receive_notice.name())
def main_func(config):
"""before main function do something.
add main sever handle, add tasks.
:param config: transport config parameters ,database connection info ,
rabbit-mq connection info, logstash connection info included
"""
# do framework things
global _ENV_ARGS
_ENV_ARGS = get_env(config)
LOG.info('manage start ok')
LOG.info('config -------- %s' % config)
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir,
os.pardir))
deploy_file = open(os.path.join(possible_topdir, 'etc', 'deploy.conf'))
deploy_dict = json.load(deploy_file)
global HOST_IP
global HOST_PORT
HOST_IP = deploy_dict["manage_rest"]["host"]
HOST_PORT = deploy_dict["manage_rest"]["port"]
# do work things
work_func(config)
def run():
# run_applications()
Server(main_func, heartbeat=MyHeartBeatTask, manager=ManagerImp)
LOG.info('start work ok -----')
if __name__ == '__main__':
# run_applications()
Server(main_func, heartbeat=MyHeartBeatTask, manager=ManagerImp)
LOG.info('start work ok -----')
| null |
data_transmission/data_transmission/application/start.py
|
start.py
|
py
| 32,578 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "oslo_log.log.getLogger",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "oslo_log.log",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "oslo_log.log.getLogger",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "oslo_log.log",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "data_transmission.application.helper.file_operator.FileOperator",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.Manager",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "logstash.TCPLogstashHandler",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.update_helper.update_self.UpdateSelf",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.HeartBeatTask",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "data_transmission.sdk.common.commfunc.get_id",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.common.commfunc.get_macs_ips",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.mq.MQClient.send_message",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.mq.MQClient",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "psutil.Process",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "os.waitpid",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "os.WNOHANG",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "data_transmission.sdk.server_manager.KafkaTask",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "data_transmission.application.helper.mysql_helper.current_level_code",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.json_loads",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.Task",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "data_transmission.application.helper.elasticsearch_operator.ElasticsearchHelper",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.mysql_helper.current_level_code",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.mysql_helper.es_history_time",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.MQProcess",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "data_transmission.application.helper.download_helper.DownloadPackage",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.MQProcess",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "data_transmission.application.helper.elasticsearch_operator.ElasticsearchHelper",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.kafka_helper.KafkaHelper",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 582,
"usage_type": "attribute"
},
{
"api_name": "data_transmission.application.helper.json_loads",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.json_dumps",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.mq.MQClient.send_message",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.mq.MQClient",
"line_number": 628,
"usage_type": "name"
},
{
"api_name": "data_transmission.sdk.server_manager.MQProcess",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "data_transmission.application.helper.check_except.CheckExcept",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.json_loads",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 665,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 684,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_number": 685,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 686,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 689,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 699,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.jpype_helper.JpypeHelper",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.mysql_helper.MysqlHelper",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "data_transmission.application.helper.check_except.CheckExcept",
"line_number": 742,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 759,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 760,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 762,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 764,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 765,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.ProcessManage",
"line_number": 766,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 783,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_number": 784,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 785,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 787,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 787,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 788,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.Server",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "data_transmission.sdk.server_manager.Server",
"line_number": 805,
"usage_type": "call"
}
] |
180640335
|
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Create your views here.
from django.core.mail import EmailMessage
def send(request):
# return HttpResponseRedirect('../')
contact_person = request.POST.get('contact_person', '')
phone = request.POST.get('phone', '')
cargo = request.POST.get('cargo', '')
cod_cargo_railway = request.POST.get('cod_cargo_railway', '')
load_station = request.POST.get('load_station', '')
cod_load_station = request.POST.get('cod_load_station', '')
unloading_station = request.POST.get('unloading_station', '')
cod_unloading_station = request.POST['cod_unloading_station']
contact_message = "Контактна особа: %s, номер телефона: %s, вантаж: %s, код вантажу по залізниці: %s, станція навантаження: %s, код станції навантаження: %s, станція вивантаження: %s, код станція вивантаження: %s"%(contact_person, phone, cargo, cod_cargo_railway, load_station, cod_load_station, unloading_station, cod_unloading_station)
from_addr = '[email protected]'
to_addr = '[email protected]'
username = '[email protected]'
password = 'Sad_!123'
msg = MIMEMultipart()
msg['From'] = from_addr
msg['To'] = to_addr
msg['Subject'] = 'Organization of cargo transportation'
msg.attach(MIMEText(contact_message))
try:
server = smtplib.SMTP_SSL('smtp.ukr.net:2525')
except TimeoutError:
return HttpResponseRedirect('/')
server.login(username,password)
if contact_person and phone and cargo and cod_cargo_railway and load_station and cod_load_station and unloading_station and cod_unloading_station:
try:
server.sendmail(from_addr,to_addr,msg.as_string())
except BadHeaderError:
return HttpResponse('Invalid header found.')
server.quit()
return HttpResponseRedirect('../')
else:
return HttpResponse('Make sure all fields are entered and valid.')
def index(request):
return render(request, 'services/wrapper.html')
def services_search_mobile(request):
seach_text = request.GET['text-field']
seach_names_about = set(["про компанію", "Про компанію", "ПРО Компанію", "ПРО КОМПАНІЮ", "Про компанию", "про компанию", "Про Компанию", "ПРО КОМПАНИЮ", "about company", "About company", "About Company", "ABOUT company", "About COMPANY", "ABOUT COMPANY", "home"])
seach_names_production = set(["продукція", "Продукція", "ПРОДУКЦІЯ", "Продукция", "продукция", "ПРОДУКЦИЯ", "production", "Production", "PRODUCTION"])
seach_names_contact = set(["контакти", "Контакти", "КОНТАКТИ", "Контакты", "контакты", "КОНТАКТЫ", "contact", "Contact", "CONTACT"])
seach_names_reviews = set(["відгуки", "Відгуки", "ВІДГУКИ", "Отзывы", "отзывы", "ОТЗЫВЫ", "reviews", "Reviews", "REVIEWS"])
seach_names_services = set(["наші послуги", "Наші послуги", "НАШІ ПОСЛУГИ", "послуги", "Послуги", "ПОСЛУГИ", "Наши услуги", "наши услуги", "УСЛУГИ", "Услуги", "услуги", "НАШИ УСЛУГИ", "our services", "Our services", "OUR SERVICES", "services", "Services", "SERVICES"])
if seach_text in seach_names_production:
return HttpResponseRedirect("../../production/")
elif seach_text in seach_names_about:
return HttpResponseRedirect("../../")
elif seach_text in seach_names_contact:
return HttpResponseRedirect("../../contact/")
elif seach_text in seach_names_reviews:
return HttpResponseRedirect("../../reviews/")
elif seach_text in seach_names_services:
return HttpResponseRedirect("../../services/")
else:
return render(request, 'error/wrapper.html')
| null |
services/views.py
|
views.py
|
py
| 4,487 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 100,
"usage_type": "call"
}
] |
424887088
|
import numpy as np
from sklearn import model_selection, linear_model
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("../data/machine.data")
df.replace('?', np.nan, inplace = True) # 把 "?" 變成 Nan
df.dropna(inplace = True) # 把缺失值拿掉
df.drop(['vendor_name'], 1, inplace = True) # 把位在"行"的 vendor_name 拿掉
df.drop(['Model_Name'], 1, inplace = True) # 把位在"行"的 Model_Name 拿掉
X = np.array(df.drop(['PRP'], 1)) # 把位在"行"的 PRP 拿掉
Y = np.array(df['PRP']) # 把 PRP 抽出來
# X 為要劃分的特徵集 Y 為要劃分的樣本結果
X_train,X_test,Y_train,Y_test = \
model_selection.train_test_split(X, Y, test_size = 0.3)
accuracynum = 0
for j in range(1,11):
regr = linear_model.LinearRegression()
regr.fit(X_train, Y_train)
accuracynum = accuracynum + regr.score(X_test, Y_test)
accuracynum = accuracynum / 10
print( accuracynum )
| null |
hw2/src/linear_regression.py
|
linear_regression.py
|
py
| 926 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 22,
"usage_type": "name"
}
] |
459168950
|
#!/usr/bin/python3
"""Using TensorFlow Lite to detect objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import copy
import time
import threading
import cv2
import numpy as np
import paho.mqtt.publish as mqtt_publish
from PIL import Image
from collections import deque
try:
from tflite_runtime import interpreter as tflite
except ImportError:
from tensorflow import lite as tflite
font = cv2.FONT_HERSHEY_SIMPLEX
class ObjectDetector(object):
def __init__(self, model, labels, input_source, width=None, height=None,
history_size=3, threshold=0.5, include_labels=None,
mqtt_broker=None, mqtt_topic='default'):
self._capture_lock = threading.Lock()
self._thread_local = threading.local()
self._thread_local.interpreter = None
self.model = model
self.threshold = threshold
self.input_source = input_source
self.requested_width = width
self.requested_height = height
self.output_width = None
self.output_height = None
self.input_width = None
self.input_height = None
self.results = []
self.known_ids = set()
self.hist_size = history_size
self.hist_objects = deque(maxlen=history_size)
self.curr_object_id = 0
self.include_labels = include_labels
self.labels = self._load_labels(labels)
self.cap = cv2.VideoCapture(self.input_source)
if width:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.output_width = width
if height:
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.output_height = height
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.mqtt = {}
if mqtt_broker:
mqtt_host_port = mqtt_broker.split(':')
self.mqtt['hostname'] = mqtt_host_port[0]
if len(mqtt_host_port) > 1:
self.mqtt['port'] = int(mqtt_host_port[1])
self.mqtt_topic=mqtt_topic
print('''ObjectDetector configuration:
input_source: {input_source}
fps: {fps}
width: {width}
height: {height}
requested_width: {requested_width}
requested_height: {requested_height}
output_width: {output_width}
output_height: {output_height}
mqtt: {mqtt}
mqtt_topic: {mqtt_topic}'''.format(**self.__dict__))
@property
def interpreter(self):
if not hasattr(self._thread_local, 'interpreter') or self._thread_local.interpreter is None:
self._thread_local.interpreter = tflite.Interpreter(self.model)
self._thread_local.interpreter.allocate_tensors()
_, self.input_height, self.input_width, _ = self._thread_local.interpreter.get_input_details()[0]['shape']
print('''
input_width: {}
input_height: {}
'''.format(self.input_height, self.input_width))
return self._thread_local.interpreter
# Step 1 - Get frame (image)
def get_next_video_frame(self):
with self._capture_lock:
return self.cap.read()
# Step 2 - Do analysis
def process_frame(self, frame):
_ = self.interpreter # Initialize the interpreter if not already initialized
#img = cv2.resize(frame, (self.input_width , self.input_height))
#rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#image = cv2.imencode('.jpg', rgb)[1].tostring()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame_rgb).convert('RGB').resize(
(self.input_width, self.input_height), Image.ANTIALIAS)
results = self._detect_objects(image)
self.results = copy.deepcopy(results)
return results
# Step 3 - Draw results
def draw_overlay(self, frame, results=None):
if results is None:
results = self.results
self._annotate_objects(frame, results)
@staticmethod
def _load_labels(path):
"""Loads the labels file. Supports files with or without index numbers."""
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
labels = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
labels[int(pair[0])] = pair[1].strip()
else:
labels[row_number] = pair[0].strip()
return labels
def _detect_objects(self, image):
"""Returns a list of detection results, each a dictionary of object info."""
self._set_input_tensor(image)
self.interpreter.invoke()
# Get all output details
boxes = self._get_output_tensor(0)
classes = self._get_output_tensor(1)
scores = self._get_output_tensor(2)
count = int(self._get_output_tensor(3))
results = []
for i in range(count):
if ((not self.include_labels) or (self.labels[classes[i]] in self.include_labels)) and scores[i] >= self.threshold:
result = {
'bounding_box': boxes[i],
'class_id': classes[i],
'score': scores[i]
}
results.append(result)
self.unique_object_identification(results)
objects = self.objects_in_all_history(results)
obj_ids = set(objects.keys())
new_obj_ids = obj_ids - self.known_ids
for obj in self.get_multi(objects, new_obj_ids).values():
message = "New {} (id: {})".format(self.labels[obj['class_id']], obj['id'])
print(message)
self.mqtt_send_message(message)
#print(f"New {self.labels[obj['class_id']]} (id: {obj['id']})")
self.known_ids |= obj_ids
return results
def _set_input_tensor(self, image):
"""Sets the input tensor."""
tensor_index = self.interpreter.get_input_details()[0]['index']
input_tensor = self.interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def _get_output_tensor(self, index):
"""Returns the output tensor at the given index."""
output_details = self.interpreter.get_output_details()[index]
tensor = np.squeeze(self.interpreter.get_tensor(output_details['index']))
return tensor
def _annotate_objects(self, frame, results):
"""Draws the bounding box and label for each object in the results."""
for obj in results:
# Convert the bounding box figures from relative coordinates
# to absolute coordinates based on the original resolution
ymin, xmin, ymax, xmax = obj['bounding_box']
xmin = int(xmin * self.width)
xmax = int(xmax * self.width)
ymin = int(ymin * self.height)
ymax = int(ymax * self.height)
# Overlay the box, label, and score on the image
text = '%s %.2f [%s]' % (self.labels[obj['class_id']], obj['score'], obj.get('id', '?'))
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
cv2.putText(frame, text, (xmin+8, ymin+30), font, 1, (0, 255, 0), 2)
### Unique object identification
@staticmethod
def get_center(obj):
ymin, xmin, ymax, xmax = obj['bounding_box']
x = ((xmax - xmin) / 2.0) + xmin
y = ((ymax - ymin) / 2.0) + ymin
return x, y
@staticmethod
def in_bounding_box(x, y, obj):
ymin, xmin, ymax, xmax = obj['bounding_box']
return bool((xmin <= x <= xmax) and (ymin <= y <= ymax))
def find_object_in_history(self, res):
center = self.get_center(res)
for objects in reversed(self.hist_objects):
for obj in objects:
if self.in_bounding_box(*center, obj):
return obj
def is_object_in_all_history(self, hist_ids, obj):
id_ = obj['id']
for ids in hist_ids:
if id_ not in ids:
return False
return True
def objects_in_all_history(self, objects):
hist_ids = [{o['id'] for o in objs} for objs in self.hist_objects]
return {o['id']: o for o in objects if self.is_object_in_all_history(hist_ids, o)}
def get_next_object_id(self):
self.curr_object_id += 1
return self.curr_object_id
@staticmethod
def get_multi(dictionary, keys):
return {k: dictionary[k] for k in keys if k in dictionary}
def unique_object_identification(self, results):
for res in results:
obj = self.find_object_in_history(res)
res['id'] = obj['id'] if obj else self.get_next_object_id()
self.hist_objects.append(results)
# MQTT
def mqtt_send_message(self, message):
if not self.mqtt:
return
try:
mqtt_publish.single(self.mqtt_topic, message, **self.mqtt)
except:
pass
def main():
labels = load_labels(args.labels)
interpreter = tflite.Interpreter(args.model)
interpreter.allocate_tensors()
_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
if __name__ == '__main__':
main()
| null |
code/detect_objects.py
|
detect_objects.py
|
py
| 9,598 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "threading.Lock",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "threading.local",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.lite.Interpreter",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.lite",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.publish.single",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.publish",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "tensorflow.lite.Interpreter",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "tensorflow.lite",
"line_number": 268,
"usage_type": "name"
}
] |
499778385
|
"""
MicroPython MQTT Eduponics APP Client
https://github.com/STEMinds/micropython-eduponics
MIT License
Copyright (c) 2020 STEMinds
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from Eduponics import umqttsimple,bh1750,bme280
import machine
import time
import json
# set adc (analog to digital) on pin 35
adc = machine.ADC(machine.Pin(35))
# set 11dB input attenuation (voltage range roughly 0.0v - 3.6v)
adc.atten(machine.ADC.ATTN_11DB)
# Configure light sensor
light_sensor = bh1750.BH1750()
# Configure BME280
# setup I2C connection
i2c = machine.I2C(scl=machine.Pin(15), sda=machine.Pin(4))
# Initialize BME280 object with default address 0x76
bme_sensor = bme280.BME280(i2c=i2c)
# initialize dht object, DHT11 coonected to IO19
#d = dht.DHT11(machine.Pin(19))
# define water level sensor as INPUT on IO pin number 21
water_level = machine.Pin(21, machine.Pin.IN)
# define pump on pin IO23 as OUTPUT, define pump state
pump = machine.Pin(23, machine.Pin.OUT)
pump_state = False
# MQTT Unique ID
UUID = "YOUR_UUID_GENERATED_ID"
# MQTT Topics
topics = ["plants/soil","plants/environment","plants/water"]
def handle_water_level(pin):
global pump_state
# water level triggered, turn off the pump
# wait for 0.3 seconds to make sure it's just a little below the water sensor
# else the pump might become unstable
time.sleep(0.3)
pump.value(0)
pump_state = False
def get_soil_moisture():
# sensor min and max values
# can be changed and callibrated
minVal = 710
maxVal = 4095
# read soil moisture sensor data
val = adc.read()
# scale the value based on maxVal and minVal
scale = 100 / (minVal - maxVal)
# get calculated scale
normal_reading = ("%s%s" % (int((val - maxVal) * scale),"%"))
# we can also get inverted value if needed
inverted_reading = ("%s%s" % (int((minVal - val) * scale),"%"))
# for this example we'll return only the normal reading
# put everything in a JSON format suitable for the eduponics app
plant = {
"id":0,
"name":"Plant A",
"enabled":1,
"moisture":normal_reading
}
# return the data
return str(plant).replace("'",'"')
def get_environmental_data():
# get light from the light sensor
lux = int(light_sensor.readLight())
# get bme280 sensor data
bme280_values = bme_sensor.values
temperature = bme280_values[0].replace("C","")
pressure = bme280_values[1]
humidity = bme280_values[2].replace("%","")
# get DHT11 sensor data
# measure sensor data
#d.measure()
# get temperature and humidity
#temperature = d.temperature()
#humidity = d.humidity()
# get water quantity
water_quantity = water_level.value()
# put all this data into a JSON object
data = {
"temp":temperature,
"humidity":humidity,
"sunlight":lux,
"water_quantity":water_quantity
}
return str(data).replace("'",'"')
def water_plant():
global pump_state
if(pump_state or water_level.value() == 1):
# turn off the pump
pump.value(0)
pump_state = False
else:
# turn on the pump
pump.value(1)
pump_state = True
return True
def on_message_callback(topic, msg):
'''
this is a callback, will be called when the app asks for certain information
such as to water the plants when the watering button pressed
'''
# convert topic and message byte to string
topic = str(topic, 'utf-8')
msg = json.loads(str(msg, 'utf-8'))
if(topic == "%s/plants/soil" % UUID or topic == "%s/plants/environment" % UUID):
# Do nothing, we only publish to those topics
pass
elif(topic == "%s/plants/water" % UUID):
# when the app request for plant watering it goes here
if("key" in msg and "status" in msg):
# valid request, let's process it
if(msg["status"] == "pending"):
# it's waiting for us to water it, let's water it
water_plant()
# after watering, publish success message of watering
response = {"key":msg["key"],"status":"ok"}
client.publish("%s/plants/water" % UUID, str(response).replace("'",'"'))
else:
print((topic, msg))
def connect_and_subscribe():
print("[-] Connecting to MQTT client ...")
# set the MQTT broker object
client = umqttsimple.MQTTClient()
# set a callback for incoming messages (subscribed topics)
client.set_callback(on_message_callback)
# connect to the broker
client.connect()
# subscribe to the topics
for topic in topics:
client.subscribe("%s/%s" % (UUID,topic))
print("[-] Subscribed to %s successfully" % topic)
print("[-] Connected to %s MQTT broker successfully" % client.server)
return client
def restart_and_reconnect():
# something went wrong, reconnect in 5 seconds ...
print('[-] Failed to connect to MQTT broker. Reconnecting...')
time.sleep(5)
machine.reset()
try:
client = connect_and_subscribe()
except OSError as e:
restart_and_reconnect()
# configure few variables
last_message = 0
message_interval = 5
# set callback on the water level sensor, if no water stop the pump
water_level.irq(trigger=machine.Pin.IRQ_RISING, handler=handle_water_level)
while True:
try:
# check if there are new messages pending to be processed
# if there are, redirect them to callback on_message_callback()
client.check_msg()
# check if the last published data wasn't less than message_interval
if (time.time() - last_message) > message_interval:
# get soil moisture
soil_moisture = get_soil_moisture()
# publish soil moisture data
client.publish("%s/plants/soil" % UUID, soil_moisture)
#print("[-] published soil moisture")
# update environmetal data
env = get_environmental_data()
client.publish("%s/plants/environment" % UUID, env)
#print("[-] published evironmental data")
# update last message timestamp
last_message = time.time()
except OSError as e:
# if something goes wrong, reconnct to MQTT server
restart_and_reconnect()
| null |
examples/eduponics_mqtt/main.py
|
main.py
|
py
| 7,297 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "machine.ADC",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "machine.Pin",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "machine.ADC",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "Eduponics.bh1750.BH1750",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "Eduponics.bh1750",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "machine.I2C",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "machine.Pin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "Eduponics.bme280.BME280",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "Eduponics.bme280",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "machine.Pin",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "machine.Pin",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "Eduponics.umqttsimple.MQTTClient",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "Eduponics.umqttsimple",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "machine.reset",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "machine.Pin",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 214,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.