content
stringlengths 5
1.05M
|
---|
class Solution(object):
def distinctNumbers(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
s = {}
n = len(nums)
ans = []
if n < k:
return ans
for i in range(k):
if nums[i] in s:
s[nums[i]] += 1
else:
s[nums[i]] = 1
ans.append(len(s))
for i in range(k, n):
if s[nums[i-k]] == 1:
s.pop(nums[i-k])
else:
s[nums[i-k]] -= 1
if nums[i] in s:
s[nums[i]] += 1
else:
s[nums[i]] = 1
ans.append(len(s))
return ans
|
"""Resolwe SDK for Python."""
from .collection_tables import CollectionTables # noqa
from .resdk_logger import log_to_stdout, start_logging # noqa
from .resolwe import Resolwe, ResolweQuery # noqa
|
import pyeapi
def main():
pynet_sw2 = pyeapi.connect_to('pynet-sw2')
show_int = pynet_sw2.enable("show interfaces")
show_int = show_int[0]
show_int = show_int['result']
show_int = show_int['interfaces']
show_int.keys()
mgmt1 = show_int['Management1']
eth1 = show_int['Ethernet1']
eth2 = show_int['Ethernet2']
eth3 = show_int['Ethernet3']
eth4 = show_int['Ethernet4']
eth5 = show_int['Ethernet5']
eth6 = show_int['Ethernet6']
eth7 = show_int['Ethernet7']
mgmt1_counters = mgmt1['interfaceCounters']
eth1_counters = eth1['interfaceCounters']
eth2_counters = eth2['interfaceCounters']
eth3_counters = eth3['interfaceCounters']
eth4_counters = eth4['interfaceCounters']
eth5_counters = eth5['interfaceCounters']
eth6_counters = eth6['interfaceCounters']
eth7_counters = eth7['interfaceCounters']
mgmt1_inOctets = mgmt1_counters['inOctets']
mgmt1_outOctets = mgmt1_counters['outOctets']
eth1_inOctets = eth1_counters['inOctets']
eth1_outOctets = eth1_counters['outOctets']
eth2_inOctets = eth2_counters['inOctets']
eth2_outOctets = eth2_counters['outOctets']
eth3_inOctets = eth3_counters['inOctets']
eth3_outOctets = eth3_counters['outOctets']
eth4_inOctets = eth4_counters['inOctets']
eth4_outOctets = eth4_counters['outOctets']
eth5_inOctets = eth5_counters['inOctets']
eth5_outOctets = eth5_counters['outOctets']
eth6_inOctets = eth6_counters['inOctets']
eth6_outOctets = eth6_counters['outOctets']
eth7_inOctets = eth7_counters['inOctets']
eth7_outOctets = eth7_counters['outOctets']
print mgmt1_outOctets
print mgmt1_inOctets
print eth1_inOctets
print eth1_outOctets
print eth2_inOctets
print eth2_outOctets
print eth3_inOctets
print eth3_outOctets
print eth4_inOctets
print eth4_outOctets
print eth5_inOctets
print eth5_outOctets
print eth6_inOctets
print eth6_outOctets
print eth7_inOctets
print eth7_outOctets
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
'''
Trainer
Author: Oyesh Mann Singh
'''
import os
from utils.eval import Evaluator
from tqdm import tqdm, tqdm_notebook, tnrange
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import accuracy_score
torch.manual_seed(163)
tqdm.pandas(desc='Progress')
# Decay functions to be used with lr_scheduler
def lr_decay_noam(config):
return lambda t: (
10.0 * config.hidden_dim ** -0.5 * min(
(t + 1) * config.learning_rate_warmup_steps ** -1.5, (t + 1) ** -0.5))
def lr_decay_exp(config):
return lambda t: config.learning_rate_falloff ** t
# Map names to lr decay functions
lr_decay_map = {
'noam': lr_decay_noam,
'exp': lr_decay_exp
}
class Trainer:
def __init__(self, config, logger, dataloader, model, k):
self.config = config
self.logger = logger
self.dataloader = dataloader
self.verbose = config.verbose
self.use_pos = config.use_pos
self.train_dl, self.val_dl, self.test_dl = dataloader.load_data(batch_size=config.batch_size)
### DO NOT DELETE
### DEBUGGING PURPOSE
# sample = next(iter(self.train_dl))
# print(sample.TEXT)
# print(sample.LABEL)
# print(sample.POS)
self.train_dlen = len(self.train_dl)
self.val_dlen = len(self.val_dl)
self.test_dlen = len(self.test_dl)
self.model = model
self.epochs = config.epochs
self.loss_fn = nn.NLLLoss()
self.opt = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.learning_rate,
weight_decay=config.weight_decay)
self.lr_scheduler_step = self.lr_scheduler_epoch = None
# Set up learing rate decay scheme
if config.use_lr_decay:
if '_' not in config.lr_rate_decay:
raise ValueError("Malformed learning_rate_decay")
lrd_scheme, lrd_range = config.lr_rate_decay.split('_')
if lrd_scheme not in lr_decay_map:
raise ValueError("Unknown lr decay scheme {}".format(lrd_scheme))
lrd_func = lr_decay_map[lrd_scheme]
lr_scheduler = optim.lr_scheduler.LambdaLR(
self.opt,
lrd_func(config),
last_epoch=-1
)
# For each scheme, decay can happen every step or every epoch
if lrd_range == 'epoch':
self.lr_scheduler_epoch = lr_scheduler
elif lrd_range == 'step':
self.lr_scheduler_step = lr_scheduler
else:
raise ValueError("Unknown lr decay range {}".format(lrd_range))
self.k = k
self.model_name = config.model_name + self.k
self.file_name = self.model_name + '.pth'
self.model_file = os.path.join(config.output_dir, self.file_name)
self.total_train_loss = []
self.total_train_acc = []
self.total_val_loss = []
self.total_val_acc = []
self.early_max_patience = config.early_max_patience
def load_checkpoint(self):
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['state_dict'])
self.opt = checkpoint['opt']
self.opt.load_state_dict(checkpoint['opt_state'])
self.total_train_loss = checkpoint['train_loss']
self.total_train_acc = checkpoint['train_acc']
self.total_val_loss = checkpoint['val_loss']
self.total_val_acc = checkpoint['val_acc']
self.epochs = checkpoint['epochs']
def save_checkpoint(self):
save_parameters = {'state_dict': self.model.state_dict(),
'opt': self.opt,
'opt_state': self.opt.state_dict(),
'train_loss': self.total_train_loss,
'train_acc': self.total_train_acc,
'val_loss': self.total_val_loss,
'val_acc': self.total_val_acc,
'epochs': self.epochs}
torch.save(save_parameters, self.model_file)
def fit(self):
prev_lstm_val_acc = 0.0
prev_val_loss = 100.0
counter = 0
patience_limit = 10
for epoch in tnrange(0, self.epochs):
y_true_train = list()
y_pred_train = list()
total_loss_train = 0
t = tqdm(iter(self.train_dl), leave=False, total=self.train_dlen)
for (k, v) in t:
t.set_description(f'Epoch {epoch + 1}')
self.model.train()
self.opt.zero_grad()
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
y = y.view(-1)
loss = self.loss_fn(pred, y)
loss.backward()
self.opt.step()
if self.lr_scheduler_step:
self.lr_scheduler_step.step()
t.set_postfix(loss=loss.item())
pred_idx = torch.max(pred, dim=1)[1]
y_true_train += list(y.cpu().data.numpy())
y_pred_train += list(pred_idx.cpu().data.numpy())
total_loss_train += loss.item()
train_acc = accuracy_score(y_true_train, y_pred_train)
train_loss = total_loss_train / self.train_dlen
self.total_train_loss.append(train_loss)
self.total_train_acc.append(train_acc)
if self.val_dl:
y_true_val = list()
y_pred_val = list()
total_loss_val = 0
v = tqdm(iter(self.val_dl), leave=False)
for (k, v) in v:
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
y = y.view(-1)
loss = self.loss_fn(pred, y)
pred_idx = torch.max(pred, 1)[1]
y_true_val += list(y.cpu().data.numpy())
y_pred_val += list(pred_idx.cpu().data.numpy())
total_loss_val += loss.item()
valacc = accuracy_score(y_true_val, y_pred_val)
valloss = total_loss_val / self.val_dlen
self.logger.info(
f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f} | val_loss: {valloss:.4f} val_acc: {valacc:.4f}')
else:
self.logger.info(f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f}')
self.total_val_loss.append(valloss)
self.total_val_acc.append(valacc)
if self.lr_scheduler_epoch:
self.lr_scheduler_epoch.step()
if valloss < prev_val_loss:
self.save_checkpoint()
prev_val_loss = valloss
counter = 0
self.logger.info("Best model saved!!!")
else:
counter += 1
if counter >= self.early_max_patience:
self.logger.info("Training stopped because maximum tolerance reached!!!")
break
# Predict
def predict(self):
self.model.eval()
evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)
self.logger.info("Writing results")
evaluate.write_results()
self.logger.info("Evaluate results")
acc, prec, rec, f1 = evaluate.conll_eval()
return (acc, prec, rec, f1)
# Infer
def infer(self, sent):
"""
Prints the result
"""
evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)
return evaluate.infer(sent)
|
#!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
from cargo.fields import Char
from cargo.validators import LengthBoundsValidator
from unit_tests import configure
from unit_tests.validators.Validator import TestValidator
class TestLengthBoundsValidator(TestValidator):
field = Char(minlen=2, maxlen=5, validator=LengthBoundsValidator)
def setUp(self):
self.field.maxlen = 5
self.field.minlen = 2
self.field.clear()
def test_validate_empty(self):
self.assertFalse(self.field.validate())
self.field.minlen = 0
self.assertTrue(self.field.validate())
def test_validate_string(self):
self.field('foo')
self.assertTrue(self.field.validate())
def test_minlen_violation(self):
self.field('f')
self.assertFalse(self.field.validate())
self.assertEqual(LengthBoundsValidator.MINLEN_CODE,
self.field.validator.code)
def test_maxlen_violation(self):
self.field('abcbwe')
self.assertFalse(self.field.validate())
self.assertEqual(LengthBoundsValidator.MAXLEN_CODE,
self.field.validator.code)
def test_validate_none(self):
self.field(None)
self.assertFalse(self.field.validate())
self.field.minlen = 0
self.assertTrue(self.field.validate())
if __name__ == '__main__':
# Unit test
configure.run_tests(TestLengthBoundsValidator, failfast=True, verbosity=2)
|
#!/usr/bin/env python3
import sys
n = int(sys.argv[1])
peak_pos = int(sys.argv[2])
zeros = int(sys.argv[3])
a = [0]*n
a[peak_pos] = n+1
for i in range(zeros+1, n, zeros+1):
if peak_pos+i < n:
a[peak_pos+i] = 1
if peak_pos-i >= 0:
a[peak_pos-i] = 1
print(len(a))
print(' '.join(map(str, a)))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-19 14:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kiddyer', '0002_authusergroups'),
]
operations = [
migrations.RenameModel(
old_name='AuthUserGroups',
new_name='AuthUserGroup',
),
]
|
from sympy import symbols, GreaterThan, simplify, solve
def main():
s_0, s_1, s_2 = symbols('s_0 s_1 s_2')
x_b_0, x_b_1, x_b_2 = symbols('x_b_0 x_b_1 x_b_2')
y_b_0, y_b_1, y_b_2 = symbols('y_b_0 y_b_1 y_b_2')
x, y = symbols('x y')
p_0_x = s_0 * x + x_b_0
p_1_x = s_1 * x + x_b_1
p_2_x = s_2 * x + x_b_2
p_0_y = s_0 * y + y_b_0
p_1_y = s_1 * y + y_b_1
p_2_y = s_2 * y + y_b_2
expr = GreaterThan(
(p_1_x - p_0_x) * (p_2_y - p_1_y) - (p_1_y - p_0_y) * (p_2_x - p_1_x),
0)
print(simplify(solve(expr, y)))
if __name__ == "__main__":
main()
|
try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
import lxml.html
import sqlite3
from urllib.request import urlopen
from urllib.parse import urlencode
from os import environ
html = urlopen(environ['KRISHAKZBOT_URL'], timeout=60).read().decode('utf-8')
doc = lxml.html.fromstring(html)
top_ads = [(link.get('href'),) for link in doc.cssselect('.a-card__title')]
conn = sqlite3.connect('krisha.db')
conn.execute('create table if not exists ad(url text)')
conn.execute('create temp table top_ad(url text)')
conn.executemany('insert into top_ad(url) values (?)', top_ads)
new_ads = conn.execute('select url from top_ad except select url from ad').fetchall()
for (url,) in new_ads:
try:
urlopen('https://api.telegram.org/bot' + environ['KRISHAKZBOT_TGBOTTOKEN'] + '/sendMessage?' + urlencode({
'chat_id': environ['KRISHAKZBOT_TGCHATID'],
'text': 'https://krisha.kz' + url,
}), timeout=60)
except Error as err:
print(err)
else:
conn.execute('insert into ad(url) values(?)', (url,))
conn.commit()
|
import graphene
from django_prices.templatetags import prices
class Money(graphene.ObjectType):
currency = graphene.String(description="Currency code.", required=True)
amount = graphene.Float(description="Amount of money.", required=True)
localized = graphene.String(
description="Money formatted according to the current locale.",
required=True,
deprecation_reason=(
"Price formatting according to the current locale should be handled by the "
"frontend client. This field will be removed after 2020-07-31."
),
)
class Meta:
description = "Represents amount of money in specific currency."
@staticmethod
def resolve_localized(root, _info):
return prices.amount(root)
class TaxedMoney(graphene.ObjectType):
currency = graphene.String(description="Currency code.", required=True)
gross = graphene.Field(
Money, description="Amount of money including taxes.", required=True
)
net = graphene.Field(
Money, description="Amount of money without taxes.", required=True
)
tax = graphene.Field(Money, description="Amount of taxes.", required=True)
class Meta:
description = (
"Represents a monetary value with taxes. In cases where taxes were not "
"applied, net and gross values will be equal."
)
class MoneyRange(graphene.ObjectType):
start = graphene.Field(Money, description="Lower bound of a price range.")
stop = graphene.Field(Money, description="Upper bound of a price range.")
class Meta:
description = "Represents a range of amounts of money."
|
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
#!/usr/bin/env python
"""
check lexicons
check the phase1 or phase2 output lexicon files
for debugging purpose
"""
import os
import sys
from struct import unpack, calcsize
def main(filename):
""" main routine """
lex_schema = 'iiih'
if not os.path.exists(filename):
print 'error'
offset = 0
rec_size = calcsize(lex_schema)
try:
fd = open(filename)
while True:
data = fd.read(rec_size)
if data == '':
break
aa = unpack(lex_schema, data)
print aa
except IOError:
# to handle the piped output to head
# like cmd | head
fd.close()
if __name__ == '__main__':
main(sys.argv[1])
|
"""coding=utf-8
Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import tensorflow as tf
import time
class LogEvalRunHook(tf.train.SessionRunHook):
"""
report latency and throughput during eval.
"""
def __init__(self, global_batch_size, hvd_rank=-1):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.total_time = 0.0
self.count = 0
self.skipped = 0
self.time_list = []
self.t0 = 0
def before_run(self):
"""
before run, set time.
"""
self.t0 = time.time()
def after_run(self):
"""
after run, update time.
"""
elapsed_secs = time.time() - self.t0
self.count += 1
# Removing first 2 (arbitrary) number of startup iterations from perf evaluations
if self.count <= 2:
print("Skipping time record for ", self.count, " due to overhead")
self.skipped += 1
else:
self.time_list.append(elapsed_secs)
self.total_time += elapsed_secs
class LogTrainRunHook(tf.train.SessionRunHook):
"""
report throughput during training.
"""
def __init__(self,
global_batch_size,
hvd_rank=-1,
save_checkpoints_steps=1000):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.save_checkpoints_steps = save_checkpoints_steps
self.total_time = 0.0
self.count = 0 # Holds number of iterations, including skipped iterations for fp16 loss scaling
self.global_step = 0
self.init_global_step = 0
self.skipped = 0
self.t0 = time.time()
def after_create_session(self, session):
"""
init step.
"""
self.init_global_step = session.run(tf.train.get_global_step())
def before_run(self):
"""
before run, reset time.
"""
self.t0 = time.time()
return tf.train.SessionRunArgs(fetches=['step_update:0'])
def after_run(self, run_values):
"""
after run, update env.
"""
elapsed_secs = time.time() - self.t0
self.global_step = run_values.results[0]
self.count += 1
# Removing first step + first two steps after every checkpoint save
if (self.global_step -
self.init_global_step) % self.save_checkpoints_steps <= 1:
print("Skipping time record for ", self.global_step,
" due to checkpoint-saving/warmup overhead")
else:
self.total_time += elapsed_secs
def end(self):
"""
end of run, update step.
"""
num_global_steps = self.global_step - self.init_global_step
self.skipped = (num_global_steps // self.save_checkpoints_steps) * 2 + \
min(2, num_global_steps % self.save_checkpoints_steps) - 1
|
import argparse
import operator
import sys
import os
import setGPU
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from data import get_batch
from meta_optimizer import MetaModel, MetaOptimizer, FastMetaOptimizer
from model import Model
from torch.autograd import Variable
from torchvision import datasets, transforms
from tqdm import tqdm
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--outdir', type=str, default='training_dir', metavar='N',
help='directory where outputs are saved ')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size (default: 32)')
parser.add_argument('--optimizer_steps', type=int, default=20, metavar='N',
help='number of meta optimizer steps (default: 100)')
parser.add_argument('--truncated_bptt_step', type=int, default=20, metavar='N',
help='step at which it truncates bptt (default: 20)')
parser.add_argument('--updates_per_epoch', type=int, default=10, metavar='N',
help='updates per epoch (default: 100)')
parser.add_argument('--max_epoch', type=int, default=10000, metavar='N',
help='number of epoch (default: 10000)')
parser.add_argument('--hidden_size', type=int, default=10, metavar='N',
help='hidden size of the meta optimizer (default: 10)')
parser.add_argument('--num_layers', type=int, default=2, metavar='N',
help='number of LSTM layers (default: 2)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--train_split', type=float, default=0.8, metavar='N',
help='fraction of data going to training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
assert args.optimizer_steps % args.truncated_bptt_step == 0
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
os.system('mkdir -p %s'%args.outdir)
def main():
# Create a meta optimizer that wraps a model into a meta model
# to keep track of the meta updates.
meta_model = Model()
if args.cuda:
meta_model.cuda()
meta_optimizer = FastMetaOptimizer(MetaModel(meta_model), args.num_layers, args.hidden_size)
if args.cuda:
meta_optimizer.cuda()
optimizer = optim.Adam(meta_optimizer.parameters(), lr=1e-3)
l_val_model_best = 99999
l_val_meta_model_best = 99999
for epoch in range(args.max_epoch):
print("Epoch %s\n" % epoch)
decrease_in_loss = 0.0
final_loss = 0.0
train_iter = iter(train_loader)
loss_train_model = []
loss_train_meta = []
loss_val_model = []
loss_val_meta = []
correct = 0
incorrect = 0
#for i in tqdm(range(args.updates_per_epoch)):
#updates = int(float(args.train_split) * len(train_loader) /(((args.optimizer_steps // args.truncated_bptt_step) * args.truncated_bptt_step) + 1))
updates = int(float(args.train_split) * len(train_loader))
for i in tqdm(range(updates)):
# Sample a new model
model = Model()
if args.cuda:
model.cuda()
x, y = next(train_iter)
if args.cuda:
x, y = x.cuda(), y.cuda()
x, y = Variable(x), Variable(y)
# Compute initial loss of the model
f_x = model(x)
initial_loss = F.nll_loss(f_x, y)
for k in range(args.optimizer_steps // args.truncated_bptt_step):
# Keep states for truncated BPTT
meta_optimizer.reset_lstm(
keep_states=k > 0, model=model, use_cuda=args.cuda)
loss_sum = 0
prev_loss = torch.zeros(1)
if args.cuda:
prev_loss = prev_loss.cuda()
for j in range(args.truncated_bptt_step):
#x, y = next(train_iter)
if args.cuda:
x, y = x.cuda(), y.cuda()
x, y = Variable(x), Variable(y)
# First we need to compute the gradients of the model
f_x = model(x)
loss = F.nll_loss(f_x, y)
loss_train_model.append(loss.item())
model.zero_grad()
loss.backward()
# Perfom a meta update using gradients from model
# and return the current meta model saved in the optimizer
meta_model = meta_optimizer.meta_update(model, loss.data)
# Compute a loss for a step the meta optimizer
f_x = meta_model(x)
loss = F.nll_loss(f_x, y)
loss_sum += (loss - Variable(prev_loss))
prev_loss = loss.data
# Update the parameters of the meta optimizer
meta_optimizer.zero_grad()
loss_train_meta.append(loss_sum.item())
loss_sum.backward()
for param in meta_optimizer.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
# Compute relative decrease in the loss function w.r.t initial
# value
decrease_in_loss += loss.item() / initial_loss.item()
final_loss += loss.item()
for i in tqdm(range(int((1-args.train_split) * len(train_loader)))):
x, y = next(train_iter)
if args.cuda:
x, y = x.cuda(), y.cuda()
x, y = Variable(x), Variable(y)
# Compute initial loss of the model
f_x = model(x)
for output, index in zip(f_x.cpu().detach().numpy(), range(len(f_x.cpu().detach().numpy()))):
if y[index] == output.argmax():
correct += 1
else:
incorrect += 1
loss_model = F.nll_loss(f_x, y)
loss_val_model.append(loss_model.item())
meta_model = meta_optimizer.meta_update(model, loss.data)
# Compute a loss for a step the meta optimizer
f_x = meta_model(x)
loss_meta = F.nll_loss(f_x, y)
loss_val_meta.append(loss_meta.item())
torch.save(model.state_dict(), '%s/%s_last.pth'%(args.outdir,'model'))
torch.save(meta_model.state_dict(), '%s/%s_last.pth'%(args.outdir,'meta_model'))
l_val_model = np.mean(loss_val_model)
l_val_meta_model = np.mean(loss_val_meta)
if l_val_model < l_val_model_best:
print("new best model")
l_val_model_best = l_val_model
torch.save(model.state_dict(), '%s/%s_best.pth'%(args.outdir,'model'))
if l_val_meta_model < l_val_meta_model_best:
print("new best meta-model")
l_val_meta_model_best = l_val_meta_model
torch.save(meta_model.state_dict(), '%s/%s_best.pth'%(args.outdir,'meta_model'))
#print("Epoch: {}, final loss {}, average final/initial loss ratio: {}".format(epoch, final_loss / args.updates_per_epoch, decrease_in_loss / args.updates_per_epoch))
print '\nValidation Loss Model: '+ str(np.mean(loss_val_model))
print '\nValidation Loss Meta: '+ str(np.mean(loss_val_meta))
print '\nValidation Accuracy: ' + str(float(correct) / (correct + incorrect))
print '\nTraining Loss Model: '+ str(np.mean(loss_train_model))
print '\nTraining Loss Meta: '+ str(np.mean(loss_train_meta))
if __name__ == "__main__":
main()
|
from __future__ import print_function
import xlsxwriter
import xml.dom
import xml.etree
import sys
if len(sys.argv) < 2:
print("\nNo files were specified on the command line.\n"
+ "Usage: python_excel_writer.py infile.nessus outfile.xlsx\n"
+ "Quitting....\n")
exit()
elif ".nessus" not in sys.argv[1]:
print("\nYour input filename did not contain a valid .nessus extension\n"
+ "Usage: python_excel_writer.py infile.nessus outfile.xlsx\n"
+ "Quitting....\n")
exit()
elif ".xlsx" not in sys.argv[2]:
print("\nYour output filename did not contain a valid .xlsx extension\n"
+ "Usage: python_excel_writer.py infile.nessus outfile.xlsx\n"
+ "Quitting....\n")
exit()
#friendly naming for sys args
input_file = sys.argv[1]
output_file = sys.argv[2]
#print ("The arguments are: " + str(sys.argv))
workbook = xlsxwriter.Workbook(output_file)
worksheet = workbook.add_worksheet();
#worksheet.write('A1', 'Hello world')
#workbook.close() |
import copy
import importlib
import itertools
import logging
import os
import sys
import numpy as np
import pandas as pd
from pandas.api.types import is_integer
from pathlib import Path
try:
# used by pytest tmpdir
from py._path.local import LocalPath
FILE_TYPE = (str, Path, LocalPath)
except ImportError:
LocalPath = None
FILE_TYPE = (str, Path)
from tempfile import TemporaryDirectory
from pyam.slice import IamSlice
from pyam.filter import filter_by_time_domain, filter_by_year, filter_by_dt_arg
try:
from datapackage import Package
HAS_DATAPACKAGE = True
except ImportError:
Package = None
HAS_DATAPACKAGE = False
try:
import ixmp
ixmp.TimeSeries
has_ix = True
except (ImportError, AttributeError):
has_ix = False
from pyam.run_control import run_control
from pyam.utils import (
write_sheet,
read_file,
read_pandas,
format_data,
merge_meta,
find_depth,
pattern_match,
to_list,
isstr,
islistable,
print_list,
s,
DEFAULT_META_INDEX,
META_IDX,
IAMC_IDX,
SORT_IDX,
ILLEGAL_COLS,
)
from pyam.filter import (
datetime_match,
)
from pyam.read_ixmp import read_ix
from pyam.plotting import PlotAccessor
from pyam.compute import IamComputeAccessor
from pyam._compare import _compare
from pyam.aggregation import (
_aggregate,
_aggregate_region,
_aggregate_time,
_aggregate_recursive,
_group_and_agg,
)
from pyam._ops import _op_data
from pyam.units import convert_unit
from pyam.index import (
get_index_levels,
get_index_levels_codes,
get_keep_col,
verify_index_integrity,
replace_index_values,
)
from pyam.time import swap_time_for_year, swap_year_for_time
from pyam._debiasing import _compute_bias
from pyam.logging import raise_data_error
logger = logging.getLogger(__name__)
class IamDataFrame(object):
"""Scenario timeseries data and meta indicators
The class provides a number of diagnostic features (including validation of
data, completeness of variables provided), processing tools (e.g.,
unit conversion), as well as visualization and plotting tools.
Parameters
----------
data : :class:`pandas.DataFrame`, :class:`ixmp.Scenario`,\
or file-like object as str or :class:`pathlib.Path`
Scenario timeseries data following the IAMC data format or
a supported variation as pandas object, a path to a file,
or a scenario of an ixmp instance.
meta : :class:`pandas.DataFrame`, optional
A dataframe with suitable 'meta' indicators for the new instance.
The index will be downselected to scenarios present in `data`.
index : list, optional
Columns to use for resulting IamDataFrame index.
kwargs
If `value=<col>`, melt column `<col>` to 'value' and use `<col>` name
as 'variable'; or mapping of required columns (:code:`IAMC_IDX`) to
any of the following:
- one column in `data`
- multiple columns, to be concatenated by :code:`|`
- a string to be used as value for this column
Notes
-----
A :class:`pandas.DataFrame` can have the required dimensions
as columns or index.
R-style integer column headers (i.e., `X2015`) are acceptable.
When initializing an :class:`IamDataFrame` from an xlsx file,
|pyam| will per default parse all sheets starting with 'data'
for timeseries and a sheet 'meta' to populate the respective table.
Sheet names can be specified with kwargs :code:`sheet_name` ('data')
and :code:`meta_sheet_name` ('meta'), where
values can be a string or a list and '*' is interpreted as a wildcard.
Calling the class with :code:`meta_sheet_name=False` will
skip the import of the 'meta' table.
When initializing an :class:`IamDataFrame` from an object that is already
an :class:`IamDataFrame` instance, the new object will be hard-linked to
all attributes of the original object - so any changes on one object
(e.g., with :code:`inplace=True`) may also modify the other object!
This is intended behaviour and consistent with pandas but may be confusing
for those who are not used to the pandas/Python universe.
"""
def __init__(self, data, meta=None, index=DEFAULT_META_INDEX, **kwargs):
"""Initialize an instance of an IamDataFrame"""
if isinstance(data, IamDataFrame):
if kwargs:
raise ValueError(
f"Invalid arguments for initializing from IamDataFrame: {kwargs}"
)
if index != data.index.names:
msg = f"Incompatible `index={index}` with {type(data)} "
raise ValueError(msg + f"(index={data.index.names})")
for attr, value in data.__dict__.items():
setattr(self, attr, value)
else:
self._init(data, meta, index=index, **kwargs)
def _init(self, data, meta=None, index=DEFAULT_META_INDEX, **kwargs):
"""Process data and set attributes for new instance"""
# pop kwarg for meta_sheet_name (prior to reading data from file)
meta_sheet = kwargs.pop("meta_sheet_name", "meta")
# if meta is given explicitly, verify that index matches
if meta is not None and not meta.index.names == index:
raise ValueError(
f"Incompatible `index={index}` with `meta` "
f"(index={meta.index.names})!"
)
# cast data from pandas
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
_data = format_data(data.copy(), index=index, **kwargs)
# read data from ixmp Platform instance
elif has_ix and isinstance(data, ixmp.TimeSeries):
# TODO read meta indicators from ixmp
_data = read_ix(data, **kwargs)
# read from file
elif isinstance(data, FILE_TYPE):
data = Path(data) # casting str or LocalPath to Path
if not data.is_file():
raise FileNotFoundError(f"No such file: '{data}'")
logger.info(f"Reading file {data}")
_data = read_file(data, index=index, **kwargs)
# unsupported `data` args
elif islistable(data):
raise ValueError(
"Initializing from list is not supported, "
"use `IamDataFrame.append()` or `pyam.concat()`"
)
else:
raise ValueError("IamDataFrame constructor not properly called!")
self._data, index, self.time_col, self.extra_cols = _data
# define `meta` dataframe for categorization & quantitative indicators
self.meta = pd.DataFrame(index=_make_index(self._data, cols=index))
self.reset_exclude()
# if given explicitly, merge meta dataframe after downselecting
if meta is not None:
meta = meta.loc[self.meta.index.intersection(meta.index)]
self.meta = merge_meta(meta, self.meta, ignore_conflict=True)
# if initializing from xlsx, try to load `meta` table from file
if meta_sheet and isinstance(data, Path) and data.suffix == ".xlsx":
excel_file = pd.ExcelFile(data, engine="openpyxl")
if meta_sheet in excel_file.sheet_names:
self.load_meta(excel_file, sheet_name=meta_sheet, ignore_conflict=True)
self._set_attributes()
# execute user-defined code
if "exec" in run_control():
self._execute_run_control()
# add the `plot` and `compute` handlers
self.plot = PlotAccessor(self)
self._compute = None
def _set_attributes(self):
"""Utility function to set attributes, called on __init__/filter/append/..."""
# add/reset internal time-index attribute (set when first using `time`)
setattr(self, "_time", None)
# add/reset year attribute (only if time domain is year, i.e., all integer)
if self.time_col == "year":
setattr(self, "year", get_index_levels(self._data, "year"))
# add/reset internal time domain attribute (set when first using `time_domain`)
setattr(self, "_time_domain", None)
# set non-standard index columns as attributes
for c in self.meta.index.names:
if c not in META_IDX:
setattr(self, c, get_index_levels(self.meta, c))
# set extra data columns as attributes
for c in self.extra_cols:
setattr(self, c, get_index_levels(self._data, c))
def _finalize(self, data, append, **args):
"""Append `data` to `self` or return as new IamDataFrame with copy of `meta`"""
if append:
self.append(data, **args, inplace=True)
else:
if data is None or data.empty:
return _empty_iamframe(self.dimensions + ["value"])
return IamDataFrame(data, meta=self.meta, **args)
def __getitem__(self, key):
_key_check = [key] if isstr(key) else key
if isinstance(key, IamSlice):
return IamDataFrame(self._data.loc[key])
elif key == "value":
return pd.Series(self._data.values, name="value")
elif set(_key_check).issubset(self.meta.columns):
return self.meta.__getitem__(key)
else:
return self.get_data_column(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return self.info()
@property
def compute(self):
"""Access to advanced computation methods, see :class:`IamComputeAccessor`"""
if self._compute is None:
self._compute = IamComputeAccessor(self)
return self._compute
def info(self, n=80, meta_rows=5, memory_usage=False):
"""Print a summary of the object index dimensions and meta indicators
Parameters
----------
n : int
The maximum line length
meta_rows : int
The maximum number of meta indicators printed
"""
# concatenate list of index dimensions and levels
info = f"{type(self)}\nIndex:\n"
c1 = max([len(i) for i in self.dimensions]) + 1
c2 = n - c1 - 5
info += "\n".join(
[
f" * {i:{c1}}: {print_list(getattr(self, i), c2)}"
for i in self.index.names
]
)
# concatenate list of index of _data (not in index.names)
info += "\nTimeseries data coordinates:\n"
info += "\n".join(
[
f" {i:{c1}}: {print_list(getattr(self, i), c2)}"
for i in self.dimensions
if i not in self.index.names
]
)
# concatenate list of (head of) meta indicators and levels/values
def print_meta_row(m, t, lst):
_lst = print_list(lst, n - len(m) - len(t) - 7)
return f" {m} ({t}) {_lst}"
info += "\nMeta indicators:\n"
info += "\n".join(
[
print_meta_row(m, t, self.meta[m].unique())
for m, t in zip(
self.meta.columns[0:meta_rows], self.meta.dtypes[0:meta_rows]
)
]
)
# print `...` if more than `meta_rows` columns
if len(self.meta.columns) > meta_rows:
info += "\n ..."
# add info on size (optional)
if memory_usage:
size = self._data.memory_usage() + sum(self.meta.memory_usage())
info += f"\nMemory usage: {size} bytes"
return info
def _execute_run_control(self):
for module_block in run_control()["exec"]:
fname = module_block["file"]
functions = module_block["functions"]
dirname = os.path.dirname(fname)
if dirname:
sys.path.append(dirname)
module = os.path.basename(fname).split(".")[0]
mod = importlib.import_module(module)
for func in functions:
f = getattr(mod, func)
f(self)
@property
def index(self):
"""Return all model-scenario combinations as :class:`pandas.MultiIndex`
The index allows to loop over the available model-scenario combinations
using:
.. code-block:: python
for model, scenario in df.index:
...
"""
return self.meta.index
@property
def model(self):
"""Return the list of (unique) model names"""
return self._get_meta_index_levels("model")
@property
def scenario(self):
"""Return the list of (unique) scenario names"""
return self._get_meta_index_levels("scenario")
def _get_meta_index_levels(self, name):
"""Return the list of a level from meta"""
if name in self.meta.index.names:
return get_index_levels(self.meta, name)
# in case of non-standard meta.index.names
raise KeyError(f"Index `{name}` does not exist!")
@property
def region(self):
"""Return the list of (unique) regions"""
return get_index_levels(self._data, "region")
@property
def variable(self):
"""Return the list of (unique) variables"""
return get_index_levels(self._data, "variable")
@property
def unit(self):
"""Return the list of (unique) units"""
return get_index_levels(self._data, "unit")
@property
def unit_mapping(self):
"""Return a dictionary of variables to (list of) corresponding units"""
def list_or_str(x):
x = list(x.drop_duplicates())
return x if len(x) > 1 else x[0]
return (
pd.DataFrame(
zip(self.get_data_column("variable"), self.get_data_column("unit")),
columns=["variable", "unit"],
)
.groupby("variable")
.apply(lambda u: list_or_str(u.unit))
.to_dict()
)
@property
def time(self):
"""The time index, i.e., axis labels related to the time domain.
Returns
-------
- A :class:`pandas.Int64Index` if the :attr:`time_domain` is 'year'
- A :class:`pandas.DatetimeIndex` if the :attr:`time_domain` is 'datetime'
- A :class:`pandas.Index` if the :attr:`time_domain` is 'mixed'
"""
if self._time is None:
self._time = pd.Index(
self._data.index.unique(level=self.time_col).values, name="time"
)
return self._time
@property
def data(self):
"""Return the timeseries data as a long :class:`pandas.DataFrame`"""
if self.empty: # reset_index fails on empty with `datetime` column
return pd.DataFrame([], columns=self.dimensions + ["value"])
return self._data.reset_index()
def get_data_column(self, column):
"""Return a `column` from the timeseries data in long format
Equivalent to :meth:`IamDataFrame.data[column] <IamDataFrame.data>`.
Parameters
----------
column : str
The column name.
Returns
-------
pd.Series
"""
return pd.Series(self._data.index.get_level_values(column), name=column)
@property
def dimensions(self):
"""Return the list of `data` columns (index names & data coordinates)"""
return list(self._data.index.names)
@property
def time_domain(self):
"""Indicator of the time domain: 'year', 'datetime', or 'mixed'"""
if self._time_domain is None:
if self.time_col == "year":
self._time_domain = "year"
elif isinstance(self.time, pd.DatetimeIndex):
self._time_domain = "datetime"
else:
self._time_domain = "mixed"
return self._time_domain
def copy(self):
"""Make a deepcopy of this object
See :func:`copy.deepcopy` for details.
"""
return copy.deepcopy(self)
def head(self, *args, **kwargs):
"""Identical to :meth:`pandas.DataFrame.head()` operating on data"""
return self.data.head(*args, **kwargs)
def tail(self, *args, **kwargs):
"""Identical to :meth:`pandas.DataFrame.tail()` operating on data"""
return self.data.tail(*args, **kwargs)
@property
def empty(self):
"""Indicator whether this object is empty"""
return self._data.empty
def equals(self, other):
"""Test if two objects contain the same data and meta indicators
This function allows two IamDataFrame instances to be compared against
each other to see if they have the same timeseries data and meta
indicators. nan's in the same location of the meta table are considered
equal.
Parameters
----------
other : IamDataFrame
the other :class:`IamDataFrame` to be compared with `self`
"""
if not isinstance(other, IamDataFrame):
raise ValueError("`other` is not an `IamDataFrame` instance")
if compare(self, other).empty and self.meta.equals(other.meta):
return True
else:
return False
def append(
self,
other,
ignore_meta_conflict=False,
inplace=False,
verify_integrity=True,
**kwargs,
):
"""Append any IamDataFrame-like object to this object
Indicators in `other.meta` that are not in `self.meta` are merged.
Missing values are set to `NaN`.
Conflicting `data` rows always raise a `ValueError`.
Parameters
----------
other : IamDataFrame, ixmp.Scenario, pandas.DataFrame or data file
Any object castable as IamDataFrame to be appended
ignore_meta_conflict : bool, optional
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, optional
If True, do operation inplace and return None
verify_integrity : bool, optional
If True, verify integrity of index
kwargs
Passed to :class:`IamDataFrame(other, **kwargs) <IamDataFrame>`
if `other` is not already an IamDataFrame
Returns
-------
IamDataFrame
If *inplace* is :obj:`False`.
None
If *inplace* is :obj:`True`.
Raises
------
ValueError
If time domain or other timeseries data index dimension don't match.
"""
if other is None:
return None if inplace else self.copy()
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
if self.extra_cols != other.extra_cols:
raise ValueError("Incompatible timeseries data index dimensions")
if other.empty:
return None if inplace else self.copy()
ret = self.copy() if not inplace else self
if ret.time_col != other.time_col:
if ret.time_col == "year":
ret.swap_year_for_time(inplace=True)
else:
other = other.swap_year_for_time(inplace=False)
# merge `meta` tables
ret.meta = merge_meta(ret.meta, other.meta, ignore_meta_conflict)
# append other.data (verify integrity for no duplicates)
_data = pd.concat([ret._data, other._data])
if verify_integrity:
verify_index_integrity(_data)
# merge extra columns in `data`
ret.extra_cols += [i for i in other.extra_cols if i not in ret.extra_cols]
ret._data = _data.sort_index()
ret._set_attributes()
if not inplace:
return ret
def pivot_table(
self,
index,
columns,
values="value",
aggfunc="count",
fill_value=None,
style=None,
):
"""Returns a pivot table
Parameters
----------
index : str or list of str
rows for Pivot table
columns : str or list of str
columns for Pivot table
values : str, default 'value'
dataframe column to aggregate or count
aggfunc : str or function, default 'count'
function used for aggregation,
accepts 'count', 'mean', and 'sum'
fill_value : scalar, default None
value to replace missing values with
style : str, default None
output style for pivot table formatting
accepts 'highlight_not_max', 'heatmap'
"""
index = [index] if isstr(index) else index
columns = [columns] if isstr(columns) else columns
if values != "value":
raise ValueError("This method only supports `values='value'`!")
df = self._data
# allow 'aggfunc' to be passed as string for easier user interface
if isstr(aggfunc):
if aggfunc == "count":
df = self._data.groupby(index + columns).count()
fill_value = 0
elif aggfunc == "mean":
df = self._data.groupby(index + columns).mean().round(2)
fill_value = 0 if style == "heatmap" else ""
elif aggfunc == "sum":
df = self._data.groupby(index + columns).sum()
fill_value = 0 if style == "heatmap" else ""
df = df.unstack(level=columns, fill_value=fill_value)
return df
def interpolate(self, time, inplace=False, **kwargs):
"""Interpolate missing values in the timeseries data
This method uses :meth:`pandas.DataFrame.interpolate`,
which applies linear interpolation by default
Parameters
----------
time : int or datetime, or list-like thereof
Year or :class:`datetime.datetime` to be interpolated.
This must match the datetime/year format of `self`.
inplace : bool, optional
if True, do operation inplace and return None
kwargs
passed to :meth:`pandas.DataFrame.interpolate`
"""
ret = self.copy() if not inplace else self
interp_kwargs = dict(method="slinear", axis=1)
interp_kwargs.update(kwargs)
time = to_list(time)
# TODO - have to explicitly cast to numpy datetime to sort later,
# could enforce as we do for year below
if self.time_col == "time":
time = list(map(np.datetime64, time))
elif not all(is_integer(x) for x in time):
raise ValueError(f"The `time` argument {time} contains non-integers")
old_cols = list(ret[ret.time_col].unique())
columns = np.unique(np.concatenate([old_cols, time]))
# calculate a separate dataframe with full interpolation
df = ret.timeseries()
newdf = df.reindex(columns=columns).interpolate(**interp_kwargs)
# replace only columns asked for
for col in time:
df[col] = newdf[col]
# replace underlying data object
# TODO naming time_col could be done in timeseries()
df.columns.name = ret.time_col
df = df.stack() # long-data to pd.Series
df.name = "value"
ret._data = df.sort_index()
ret._set_attributes()
if not inplace:
return ret
def swap_time_for_year(self, subannual=False, inplace=False):
"""Convert the `time` dimension to `year` (as integer).
Parameters
----------
subannual : bool, str or func, optional
Merge non-year components of the "time" domain as new column "subannual".
Apply :meth:`strftime() <datetime.date.strftime>` on the values of the
"time" domain using `subannual` (if a string) or "%m-%d %H:%M%z" (if True).
If it is a function, apply the function on the values of the "time" domain.
inplace : bool, optional
If True, do operation inplace and return None.
Returns
-------
:class:`IamDataFrame` or **None**
Object with altered time domain or None if `inplace=True`.
Raises
------
ValueError
"time" is not a column of `self.data`
See Also
--------
swap_year_for_time
"""
return swap_time_for_year(self, subannual=subannual, inplace=inplace)
def swap_year_for_time(self, inplace=False):
"""Convert the `year` and `subannual` dimensions to `time` (as datetime).
The method applies :meth:`dateutil.parser.parse` on the combined columns
`year` and `subannual`:
.. code-block:: python
dateutil.parser.parse([f"{y}-{s}" for y, s in zip(year, subannual)])
Parameters
----------
inplace : bool, optional
If True, do operation inplace and return None.
Returns
-------
:class:`IamDataFrame` or **None**
Object with altered time domain or None if `inplace=True`.
Raises
------
ValueError
"year" or "subannual" are not a column of `self.data`
See Also
--------
swap_time_for_year
"""
return swap_year_for_time(self, inplace=inplace)
def as_pandas(self, meta_cols=True):
"""Return object as a pandas.DataFrame
Parameters
----------
meta_cols : list, default None
join `data` with all `meta` columns if True (default)
or only with columns in list, or return copy of `data` if False
"""
# merge data and (downselected) meta, or return copy of data
if meta_cols:
meta_cols = self.meta.columns if meta_cols is True else meta_cols
return (
self.data.set_index(META_IDX).join(self.meta[meta_cols]).reset_index()
)
else:
return self.data.copy()
def timeseries(self, iamc_index=False):
"""Returns `data` as :class:`pandas.DataFrame` in wide format
Parameters
----------
iamc_index : bool, optional
if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;
else, use all 'data' columns
Raises
------
ValueError
`IamDataFrame` is empty
ValueError
reducing to IAMC-index yields an index with duplicates
"""
if self.empty:
raise ValueError("This IamDataFrame is empty!")
s = self._data
if iamc_index:
if self.time_col == "time":
raise ValueError(
"Cannot use `iamc_index=True` with 'datetime' time-domain!"
)
s = s.droplevel(self.extra_cols)
return s.unstack(level=self.time_col).rename_axis(None, axis=1)
def reset_exclude(self):
"""Reset exclusion assignment for all scenarios to `exclude: False`"""
self.meta["exclude"] = False
def set_meta(self, meta, name=None, index=None):
"""Add meta indicators as pandas.Series, list or value (int/float/str)
Parameters
----------
meta : pandas.Series, list, int, float or str
column to be added to 'meta'
(by `['model', 'scenario']` index if possible)
name : str, optional
meta column name (defaults to meta `pandas.Series.name`);
either `meta.name` or the name kwarg must be defined
index : IamDataFrame, pandas.DataFrame or pandas.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
# check that name is valid and doesn't conflict with data columns
if (name or (hasattr(meta, "name") and meta.name)) in [None, False]:
raise ValueError("Must pass a name or use a named pd.Series")
name = name or meta.name
if name in self.dimensions:
raise ValueError(f"Column {name} already exists in `data`!")
if name in ILLEGAL_COLS:
raise ValueError(f"Name {name} is illegal for meta indicators!")
# check if meta has a valid index and use it for further workflow
if (
hasattr(meta, "index")
and hasattr(meta.index, "names")
and set(META_IDX).issubset(meta.index.names)
):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError("Index cannot be coerced to pd.MultiIndex")
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("Non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
# reduce index dimensions to model-scenario only
meta = meta.reset_index().reindex(columns=META_IDX + [name]).set_index(META_IDX)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
raise ValueError(f"Adding meta for non-existing scenarios:\n{diff}")
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name])
def set_meta_from_data(self, name, method=None, column="value", **kwargs):
"""Add meta indicators from downselected timeseries data of self
Parameters
----------
name : str
column name of the 'meta' table
method : function, optional
method for aggregation
(e.g., :func:`numpy.max <numpy.ndarray.max>`);
required if downselected data do not yield unique values
column : str, optional
the column from `data` to be used to derive the indicator
kwargs
passed to :meth:`filter` for downselected data
"""
_data = self.filter(**kwargs).data
if method is None:
meta = _data.set_index(META_IDX)[column]
else:
meta = _data.groupby(META_IDX)[column].apply(method)
self.set_meta(meta, name)
def categorize(
self, name, value, criteria, color=None, marker=None, linestyle=None
):
"""Assign scenarios to a category according to specific criteria
Parameters
----------
name : str
column name of the 'meta' table
value : str
category identifier
criteria : dict
dictionary with variables mapped to applicable checks
('up' and 'lo' for respective bounds, 'year' for years - optional)
color : str, optional
assign a color to this category for plotting
marker : str, optional
assign a marker to this category for plotting
linestyle : str, optional
assign a linestyle to this category for plotting
"""
# add plotting run control
for kind, arg in [
("color", color),
("marker", marker),
("linestyle", linestyle),
]:
if arg:
run_control().update({kind: {name: {value: arg}}})
# find all data that matches categorization
rows = _apply_criteria(self._data, criteria, in_range=True, return_test="all")
idx = _make_index(rows, cols=self.index.names)
if len(idx) == 0:
logger.info("No scenarios satisfy the criteria")
return # EXIT FUNCTION
# update meta dataframe
self._new_meta_column(name)
self.meta.loc[idx, name] = value
msg = "{} scenario{} categorized as `{}: {}`"
logger.info(msg.format(len(idx), "" if len(idx) == 1 else "s", name, value))
def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set value to nan"""
if name is None:
raise ValueError(f"Cannot add a meta column {name}")
if name not in self.meta:
self.meta[name] = np.nan
def require_variable(self, variable, unit=None, year=None, exclude_on_fail=False):
"""Check whether all scenarios have a required variable
Parameters
----------
variable : str
required variable
unit : str, default None
name of unit (optional)
year : int or list, default None
check whether the variable exists for ANY of the years (if a list)
exclude_on_fail : bool, default False
flag scenarios missing the required variables as `exclude: True`
"""
criteria = {"variable": variable}
if unit:
criteria.update({"unit": unit})
if year:
criteria.update({"year": year})
keep = self._apply_filters(**criteria)
idx = self.meta.index.difference(_meta_idx(self.data[keep]))
n = len(idx)
if n == 0:
logger.info(
"All scenarios have the required variable `{}`".format(variable)
)
return
msg = (
"{} scenario does not include required variable `{}`"
if n == 1
else "{} scenarios do not include required variable `{}`"
)
if exclude_on_fail:
self.meta.loc[idx, "exclude"] = True
msg += ", marked as `exclude: True` in `meta`"
logger.info(msg.format(n, variable))
return pd.DataFrame(index=idx).reset_index()
def validate(self, criteria={}, exclude_on_fail=False):
"""Validate scenarios using criteria on timeseries values
Returns all scenarios which do not match the criteria and prints a log
message, or returns None if all scenarios match the criteria.
When called with `exclude_on_fail=True`, scenarios not
satisfying the criteria will be marked as `exclude=True`.
Parameters
----------
criteria : dict
dictionary with variable keys and validation mappings
('up' and 'lo' for respective bounds, 'year' for years)
exclude_on_fail : bool, optional
flag scenarios failing validation as `exclude: True`
Returns
-------
:class:`pandas.DataFrame`
All data points that do not satisfy the criteria.
None
If all scenarios satisfy the criteria.
"""
df = _apply_criteria(self._data, criteria, in_range=False)
if not df.empty:
msg = "{} of {} data points do not satisfy the criteria"
logger.info(msg.format(len(df), len(self.data)))
if exclude_on_fail and len(df) > 0:
self._exclude_on_fail(df)
return df.reset_index()
def compute_bias(self, name, method, axis):
"""Compute the bias weights and add to 'meta'
Parameters
----------
name : str
Column name in the 'meta' dataframe
method : str
Method to compute the bias weights, see the notes
axis : str
Index dimensions on which to apply the `method`
Notes
-----
The following methods are implemented:
- "count": use the inverse of the number of scenarios grouped by `axis` names.
Using the following method on an IamDataFrame with three scenarios
.. code-block:: python
df.compute_bias(name="bias-weight", method="count", axis="scenario")
results in the following column to be added to *df.meta*:
.. list-table::
:header-rows: 1
* - model
- scenario
- bias-weight
* - model_a
- scen_a
- 0.5
* - model_a
- scen_b
- 1
* - model_b
- scen_a
- 0.5
"""
_compute_bias(self, name, method, axis)
def rename(
self, mapping=None, inplace=False, append=False, check_duplicates=True, **kwargs
):
"""Rename any index dimension or data coordinate.
When renaming models or scenarios, the uniqueness of the index must be
maintained, and the function will raise an error otherwise.
Renaming is only applied to any data row that matches for all
columns given in `mapping`. Renaming can only be applied to the `model`
and `scenario` columns, or to other data coordinates simultaneously.
Parameters
----------
mapping : dict or kwargs
mapping of column name to rename-dictionary of that column
.. code-block:: python
dict(<column_name>: {<current_name_1>: <target_name_1>,
<current_name_2>: <target_name_2>})
or kwargs as `column_name={<current_name_1>: <target_name_1>, ...}`
inplace : bool, optional
Do operation inplace and return None.
append : bool, optional
Whether to append aggregated timeseries data to this instance
(if `inplace=True`) or to a returned new instance (if `inplace=False`).
check_duplicates : bool, optional
Check whether conflicts exist after renaming of timeseries data coordinates.
If True, raise a ValueError; if False, rename and merge
with :meth:`groupby().sum() <pandas.core.groupby.GroupBy.sum>`.
Returns
-------
:class:`IamDataFrame` or **None**
Aggregated timeseries data as new object or None if `inplace=True`.
"""
# combine `mapping` arg and mapping kwargs, ensure no rename conflicts
mapping = mapping or {}
duplicate = set(mapping).intersection(kwargs)
if duplicate:
raise ValueError(f"Conflicting rename args for columns {duplicate}")
mapping.update(kwargs)
# return without any changes if self is empty
if self.empty:
return self if inplace else self.copy()
# determine columns that are not in the meta index
meta_idx = self.meta.index.names
data_cols = set(self.dimensions) - set(meta_idx)
# changing index and data columns can cause model-scenario mismatch
if any(i in mapping for i in meta_idx) and any(i in mapping for i in data_cols):
msg = "Renaming index and data columns simultaneously not supported!"
raise ValueError(msg)
# translate rename mapping to `filter()` arguments
filters = {col: _from.keys() for col, _from in mapping.items()}
# if append is True, downselect and append renamed data
if append:
df = self.filter(**filters)
# note that `append(other, inplace=True)` returns None
return self.append(df.rename(mapping), inplace=inplace)
# if append is False, iterate over rename mapping and do groupby
ret = self.copy() if not inplace else self
# renaming is only applied where a filter matches for all given columns
rows = ret._apply_filters(**filters)
idx = ret.meta.index.isin(_make_index(ret._data[rows], cols=meta_idx))
# apply renaming changes (for `data` only on the index)
_data_index = ret._data.index
for col, _mapping in mapping.items():
if col in meta_idx:
_index = pd.DataFrame(index=ret.meta.index).reset_index()
_index.loc[idx, col] = _index.loc[idx, col].replace(_mapping)
if _index.duplicated().any():
raise ValueError(f"Renaming to non-unique {col} index!")
ret.meta.index = _index.set_index(meta_idx).index
elif col not in data_cols:
raise ValueError(f"Renaming by {col} not supported!")
_data_index = replace_index_values(_data_index, col, _mapping, rows)
# check if duplicates exist in the new timeseries data index
duplicate_rows = _data_index.duplicated()
has_duplicates = any(duplicate_rows)
if has_duplicates and check_duplicates:
raise_data_error(
"Conflicting data rows after renaming "
"(use `aggregate()` or `check_duplicates=False` instead)",
_data_index[duplicate_rows].to_frame(index=False),
)
ret._data.index = _data_index
ret._set_attributes()
# merge using `groupby().sum()` only if duplicates exist
if has_duplicates:
ret._data = ret._data.reset_index().groupby(ret.dimensions).sum().value
if not inplace:
return ret
def convert_unit(
self, current, to, factor=None, registry=None, context=None, inplace=False
):
"""Convert all timeseries data having *current* units to new units.
If *factor* is given, existing values are multiplied by it, and the
*to* units are assigned to the 'unit' column.
Otherwise, the :mod:`pint` package is used to convert from *current* ->
*to* units without an explicit conversion factor. Pint natively handles
conversion between any standard (SI) units that have compatible
dimensionality, such as exajoule to terawatt-hours, :code:`EJ -> TWh`,
or tonne per year to gram per second, :code:`t / yr -> g / sec`.
The default *registry* includes additional unit definitions relevant
for integrated assessment models and energy systems analysis, via the
`iam-units <https://github.com/IAMconsortium/units>`_ package.
This registry can also be accessed directly, using:
.. code-block:: python
from iam_units import registry
When using this registry, *current* and *to* may contain the symbols of
greenhouse gas (GHG) species, such as 'CO2e', 'C', 'CH4', 'N2O',
'HFC236fa', etc., as well as lower-case aliases like 'co2' supported by
:mod:`pyam`. In this case, *context* must be the name of a specific
global warming potential (GWP) metric supported by :mod:`iam_units`,
e.g. 'AR5GWP100' (optionally prefixed by '\gwp_', e.g. 'gwp_AR5GWP100').
Rows with units other than *current* are not altered.
Parameters
----------
current : str
Current units to be converted.
to : str
New unit (to be converted to) or symbol for target GHG species. If
only the GHG species is provided, the units (e.g. :code:`Mt /
year`) will be the same as `current`, and an expression combining
units and species (e.g. 'Mt CO2e / yr') will be placed in the
'unit' column.
factor : value, optional
Explicit factor for conversion without `pint`.
registry : :class:`pint.UnitRegistry`, optional
Specific unit registry to use for conversion. Default: the
`iam-units <https://github.com/IAMconsortium/units>`_ registry.
context : str or :class:`pint.Context`, optional
(Name of) the context to use in conversion.
Required when converting between GHG species using GWP metrics,
unless the species indicated by *current* and *to* are the same.
inplace : bool, optional
Whether to return a new IamDataFrame.
Returns
-------
IamDataFrame
If *inplace* is :obj:`False`.
None
If *inplace* is :obj:`True`.
Raises
------
pint.UndefinedUnitError
if attempting a GWP conversion but *context* is not given.
pint.DimensionalityError
without *factor*, when *current* and *to* are not compatible units.
"""
# check that (only) either factor or registry/context is provided
if factor and any([registry, context]):
raise ValueError("Use either `factor` or `registry`!")
return convert_unit(self, current, to, factor, registry, context, inplace)
def normalize(self, inplace=False, **kwargs):
"""Normalize data to a specific data point
Note: Currently only supports normalizing to a specific time.
Parameters
----------
inplace : bool, optional
if :obj:`True`, do operation inplace and return None
kwargs
the column and value on which to normalize (e.g., `year=2005`)
"""
if len(kwargs) > 1 or self.time_col not in kwargs:
raise ValueError("Only time(year)-based normalization supported")
ret = self.copy() if not inplace else self
df = ret.data
# change all below if supporting more in the future
cols = self.time_col
value = kwargs[self.time_col]
x = df.set_index(IAMC_IDX)
x["value"] /= x[x[cols] == value]["value"]
x = x.reset_index()
ret._data = x.set_index(self.dimensions).value
if not inplace:
return ret
def offset(self, padding=0, fill_value=None, inplace=False, **kwargs):
"""Compute new data which is offset from a specific data point
For example, offsetting from `year=2005` will provide data
*relative* to `year=2005` such that the value in 2005 is 0 and
all other values `value[year] - value[2005]`.
Conceptually this operation performs as:
```
df - df.filter(**kwargs) + padding
```
Note: Currently only supports normalizing to a specific time.
Parameters
----------
padding : float, optional
an additional offset padding
fill_value : float or None, optional
Applied on subtraction. Fills exisiting missing (NaN) values. See
https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.subtract.html
inplace : bool, optional
if :obj:`True`, do operation inplace and return None
kwargs
the column and value on which to offset (e.g., `year=2005`)
"""
if len(kwargs) > 1 or self.time_col not in kwargs:
raise ValueError("Only time(year)-based normalization supported")
ret = self.copy() if not inplace else self
data = ret._data
value = kwargs[self.time_col]
base_value = data.loc[data.index.isin([value], level=self.time_col)].droplevel(
self.time_col
)
ret._data = data.subtract(base_value, fill_value=fill_value) + padding
if not inplace:
return ret
def aggregate(
self,
variable,
components=None,
method="sum",
recursive=False,
append=False,
):
"""Aggregate timeseries data by components or subcategories within each region
Parameters
----------
variable : str or list of str
Variable(s) for which the aggregate will be computed.
components : list of str, optional
Components to be aggregate, defaults to all subcategories of `variable`.
method : func or str, optional
Aggregation method, e.g. :func:`numpy.mean`, :func:`numpy.sum`, 'min', 'max'
recursive : bool or str, optional
Iterate recursively (bottom-up) over all subcategories of `variable`.
If there are existing intermediate variables, it validates the aggregated
value.
If recursive='skip-validate', it skips the validation.
append : bool, optional
Whether to append aggregated timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Aggregated timeseries data or None if `append=True`.
See Also
--------
add : Add timeseries data items along an `axis`.
aggregate_region : Aggregate timeseries data along the `region` dimension.
Notes
-----
The aggregation function interprets any missing values (:any:`numpy.nan`)
for individual components as 0.
"""
if recursive:
if components is not None:
raise ValueError("Recursive aggregation cannot take `components`!")
if method != "sum":
raise ValueError(
"Recursive aggregation only supported with `method='sum'`!"
)
_df = IamDataFrame(
_aggregate_recursive(self, variable, recursive), meta=self.meta
)
else:
_df = _aggregate(self, variable, components=components, method=method)
# append to `self` or return as `IamDataFrame`
return self._finalize(_df, append=append)
def check_aggregate(
self,
variable,
components=None,
method="sum",
exclude_on_fail=False,
multiplier=1,
**kwargs,
):
"""Check whether timeseries data matches the aggregation by its components
Parameters
----------
variable : str or list of str
variable(s) checked for matching aggregation of sub-categories
components : list of str, default None
list of variables, defaults to all sub-categories of `variable`
method : func or str, optional
method to use for aggregation,
e.g. :func:`numpy.mean`, :func:`numpy.sum`, 'min', 'max'
exclude_on_fail : bool, optional
flag scenarios failing validation as `exclude: True`
multiplier : number, optional
factor when comparing variable and sum of components
kwargs : arguments for comparison of values
passed to :func:`numpy.isclose`
"""
# compute aggregate from components, return None if no components
df_components = _aggregate(self, variable, components, method)
if df_components is None:
return
# filter and groupby data, use `pd.Series.align` for matching index
rows = self._apply_filters(variable=variable)
df_var, df_components = _group_and_agg(self._data[rows], [], method).align(
df_components
)
# use `np.isclose` for checking match
rows = ~np.isclose(df_var, multiplier * df_components, **kwargs)
# if aggregate and components don't match, return inconsistent data
if sum(rows):
msg = "`{}` - {} of {} rows are not aggregates of components"
logger.info(msg.format(variable, sum(rows), len(df_var)))
if exclude_on_fail:
self._exclude_on_fail(_meta_idx(df_var[rows].reset_index()))
return pd.concat(
[df_var[rows], df_components[rows]],
axis=1,
keys=(["variable", "components"]),
)
def aggregate_region(
self,
variable,
region="World",
subregions=None,
components=False,
method="sum",
weight=None,
append=False,
drop_negative_weights=True,
):
"""Aggregate timeseries data by a number of subregions
This function allows to add variable sub-categories that are only
defined at the `region` level by setting `components=True`
Parameters
----------
variable : str or list of str
variable(s) to be aggregated
region : str, optional
region to which data will be aggregated
subregions : list of str, optional
list of subregions, defaults to all regions other than `region`
components : bool or list of str, optional
variables at the `region` level to be included in the aggregation
(ignored if False); if `True`, use all sub-categories of `variable`
included in `region` but not in any of the `subregions`;
or explicit list of variables
method : func or str, optional
method to use for aggregation,
e.g. :func:`numpy.mean`, :func:`numpy.sum`, 'min', 'max'
weight : str, optional
variable to use as weight for the aggregation
(currently only supported with `method='sum'`)
append : bool, optional
append the aggregate timeseries to `self` and return None,
else return aggregate timeseries as new :class:`IamDataFrame`
drop_negative_weights : bool, optional
removes any aggregated values that are computed using negative weights
Returns
-------
:class:`IamDataFrame` or **None**
Aggregated timeseries data or None if `append=True`.
See Also
--------
add : Add timeseries data items `a` and `b` along an `axis`
aggregate : Aggregate timeseries data along the `variable` hierarchy.
"""
_df = _aggregate_region(
self,
variable,
region=region,
subregions=subregions,
components=components,
method=method,
weight=weight,
drop_negative_weights=drop_negative_weights,
)
# append to `self` or return as `IamDataFrame`
return self._finalize(_df, append=append, region=region)
def check_aggregate_region(
self,
variable,
region="World",
subregions=None,
components=False,
method="sum",
weight=None,
exclude_on_fail=False,
drop_negative_weights=True,
**kwargs,
):
"""Check whether timeseries data matches the aggregation across subregions
Parameters
----------
variable : str or list of str
variable(s) to be checked for matching aggregation of subregions
region : str, optional
region to be checked for matching aggregation of subregions
subregions : list of str, optional
list of subregions, defaults to all regions other than `region`
components : bool or list of str, optional
variables at the `region` level to be included in the aggregation
(ignored if False); if `True`, use all sub-categories of `variable`
included in `region` but not in any of the `subregions`;
or explicit list of variables
method : func or str, optional
method to use for aggregation,
e.g. :func:`numpy.mean`, :func:`numpy.sum`, 'min', 'max'
weight : str, optional
variable to use as weight for the aggregation
(currently only supported with `method='sum'`)
exclude_on_fail : boolean, optional
flag scenarios failing validation as `exclude: True`
drop_negative_weights : bool, optional
removes any aggregated values that are computed using negative weights
kwargs : arguments for comparison of values
passed to :func:`numpy.isclose`
"""
# compute aggregate from subregions, return None if no subregions
df_subregions = _aggregate_region(
self,
variable,
region,
subregions,
components,
method,
weight,
drop_negative_weights,
)
if df_subregions is None:
return
# filter and groupby data, use `pd.Series.align` for matching index
rows = self._apply_filters(region=region, variable=variable)
if not rows.any():
logger.info(f"Variable '{variable}' does not exist in region '{region}'!")
return
df_region, df_subregions = _group_and_agg(self._data[rows], "region").align(
df_subregions
)
# use `np.isclose` for checking match
rows = ~np.isclose(df_region, df_subregions, **kwargs)
# if region and subregions don't match, return inconsistent data
if sum(rows):
msg = "`{}` - {} of {} rows are not aggregates of subregions"
logger.info(msg.format(variable, sum(rows), len(df_region)))
if exclude_on_fail:
self._exclude_on_fail(_meta_idx(df_region[rows].reset_index()))
_df = pd.concat(
[
pd.concat(
[df_region[rows], df_subregions[rows]],
axis=1,
keys=(["region", "subregions"]),
)
],
keys=[region],
names=["region"],
)
_df.index = _df.index.reorder_levels(self.dimensions)
return _df
def aggregate_time(
self,
variable,
column="subannual",
value="year",
components=None,
method="sum",
append=False,
):
"""Aggregate timeseries data by subannual time resolution
Parameters
----------
variable : str or list of str
variable(s) to be aggregated
column : str, optional
the data column to be used as subannual time representation
value : str, optional
the name of the aggregated (subannual) time
components : list of str
subannual timeslices to be aggregated; defaults to all subannual
timeslices other than `value`
method : func or str, optional
method to use for aggregation,
e.g. :func:`numpy.mean`, :func:`numpy.sum`, 'min', 'max'
append : bool, optional
append the aggregate timeseries to `self` and return None,
else return aggregate timeseries as new :class:`IamDataFrame`
"""
_df = _aggregate_time(
self,
variable,
column=column,
value=value,
components=components,
method=method,
)
# append to `self` or return as `IamDataFrame`
return self._finalize(_df, append=append)
def downscale_region(
self,
variable,
region="World",
subregions=None,
proxy=None,
weight=None,
append=False,
):
"""Downscale timeseries data to a number of subregions
Parameters
----------
variable : str or list of str
variable(s) to be downscaled
region : str, optional
region from which data will be downscaled
subregions : list of str, optional
list of subregions, defaults to all regions other than `region`
(if using `proxy`) or `region` index (if using `weight`)
proxy : str, optional
variable (within the :class:`IamDataFrame`) to be used as proxy
for regional downscaling
weight : class:`pandas.DataFrame`, optional
dataframe with time dimension as columns (year or
:class:`datetime.datetime`) and regions[, model, scenario] as index
append : bool, optional
append the downscaled timeseries to `self` and return None,
else return downscaled data as new IamDataFrame
"""
if proxy is not None and weight is not None:
raise ValueError("Using both 'proxy' and 'weight' arguments is not valid!")
elif proxy is not None:
# get default subregions if not specified and select data from self
subregions = subregions or self._all_other_regions(region)
rows = self._apply_filters(variable=proxy, region=subregions)
cols = self._get_cols(["region", self.time_col])
_proxy = self.data[rows].set_index(cols).value
elif weight is not None:
# downselect weight to subregions or remove `region` from index
if subregions is not None:
rows = weight.index.isin(subregions, level="region")
else:
rows = ~weight.index.isin([region], level="region")
_proxy = weight[rows].stack()
else:
raise ValueError("Either a 'proxy' or 'weight' argument is required!")
_value = (
self.data[self._apply_filters(variable=variable, region=region)]
.set_index(self._get_cols(["variable", "unit", self.time_col]))
.value
)
# compute downscaled data
_total = _proxy.groupby(self.time_col).sum()
_data = _value * _proxy / _total
# append to `self` or return as `IamDataFrame`
return self._finalize(_data, append=append)
def _all_other_regions(self, region, variable=None):
"""Return list of regions other than `region` containing `variable`"""
rows = self._apply_filters(variable=variable)
return self._data[rows].index.get_level_values("region").difference([region])
def _variable_components(self, variable, level=0):
"""Get all components (sub-categories) of a variable for a given level
If `level=0`, for `variable='foo'`, return `['foo|bar']`, but don't
include `'foo|bar|baz'`, which is a sub-sub-category. If `level=None`,
all variables below `variable` in the hierarchy are returned."""
var_list = pd.Series(self.variable)
return var_list[pattern_match(var_list, "{}|*".format(variable), level=level)]
def _get_cols(self, cols):
"""Return a list of columns of `self.data`"""
return META_IDX + cols + self.extra_cols
def check_internal_consistency(self, components=False, **kwargs):
"""Check whether a scenario ensemble is internally consistent
We check that all variables are equal to the sum of their sectoral
components and that all the regions add up to the World total. If
the check is passed, None is returned, otherwise a DataFrame of
inconsistent variables is returned.
Note: at the moment, this method's regional checking is limited to
checking that all the regions sum to the World region. We cannot
make this more automatic unless we store how the regions relate,
see `this issue <https://github.com/IAMconsortium/pyam/issues/106>`_.
Parameters
----------
kwargs : arguments for comparison of values
passed to :func:`numpy.isclose`
components : bool, optional
passed to :meth:`check_aggregate_region` if `True`, use all
sub-categories of each `variable` included in `World` but not in
any of the subregions; if `False`, only aggregate variables over
subregions
"""
lst = []
for variable in self.variable:
diff_agg = self.check_aggregate(variable, **kwargs)
if diff_agg is not None:
lst.append(diff_agg)
diff_regional = self.check_aggregate_region(
variable, components=components, **kwargs
)
if diff_regional is not None:
lst.append(diff_regional)
if len(lst):
_df = pd.concat(lst, sort=True).sort_index()
return _df[
[
c
for c in ["variable", "components", "region", "subregions"]
if c in _df.columns
]
]
def _exclude_on_fail(self, df):
"""Assign a selection of scenarios as `exclude: True` in meta"""
idx = (
df
if isinstance(df, pd.MultiIndex)
else _make_index(df, cols=self.index.names)
)
self.meta.loc[idx, "exclude"] = True
logger.info(
"{} non-valid scenario{} will be excluded".format(
len(idx), "" if len(idx) == 1 else "s"
)
)
def slice(self, keep=True, **kwargs):
"""Return a (filtered) slice object of the IamDataFrame timeseries data index
Parameters
----------
keep : bool, optional
Keep all scenarios satisfying the filters (if *True*) or the inverse.
**kwargs
Arguments for filtering. See the "Notes".
Returns
-------
:class:`IamSlice`
Notes
-----
The following arguments are available for filtering:
- 'meta' columns: filter by string value of that column
- 'model', 'scenario', 'region', 'variable', 'unit':
string or list of strings, where `*` can be used as a wildcard
- 'level': the "depth" of entries in the variable column (number of '|')
(excluding the strings given in the 'variable' argument)
- 'year': takes an integer (int/np.int64), a list of integers or
a range. Note that the last year of a range is not included,
so `range(2010, 2015)` is interpreted as `[2010, ..., 2014]`
- 'time_domain': can be "year" or "datetime"
- arguments for filtering by `datetime.datetime` or np.datetime64
('month', 'hour', 'time')
- 'regexp=True' disables pseudo-regexp syntax in `pattern_match()`
"""
if not isinstance(keep, bool):
raise ValueError(f"Value of `keep` must be a boolean, found: {keep}")
_keep = self._apply_filters(**kwargs)
_keep = _keep if keep else ~_keep
return (
IamSlice(_keep)
if isinstance(_keep, pd.Series)
else IamSlice(_keep, self._data.index)
)
def filter(self, keep=True, inplace=False, **kwargs):
"""Return a (copy of a) filtered (downselected) IamDataFrame
Parameters
----------
keep : bool, optional
Keep all scenarios satisfying the filters (if *True*) or the inverse.
inplace : bool, optional
If *True*, do operation inplace and return *None*.
**kwargs
Passed to :meth:`slice`.
"""
# downselect `data` rows and clean up index
ret = self.copy() if not inplace else self
ret._data = ret._data[self.slice(keep=keep, **kwargs)]
ret._data.index = ret._data.index.remove_unused_levels()
# swap time for year if downselected to years-only
if ret.time_col == "time":
time_values = get_index_levels(ret._data, "time")
if time_values and all([pd.api.types.is_integer(y) for y in time_values]):
ret.swap_time_for_year(inplace=True)
msg = "Only yearly data after filtering, time-domain changed to 'year'."
logger.info(msg)
# downselect `meta` dataframe
idx = _make_index(ret._data, cols=self.index.names)
if len(idx) == 0:
logger.warning("Filtered IamDataFrame is empty!")
ret.meta = ret.meta.loc[idx]
ret.meta.index = ret.meta.index.remove_unused_levels()
ret._set_attributes()
if not inplace:
return ret
def _apply_filters(self, level=None, **filters):
"""Determine rows to keep in data for given set of filters
Parameters
----------
filters : dict
dictionary of filters of the format (`{col: values}`);
uses a pseudo-regexp syntax by default,
but accepts `regexp: True` in the dictionary to use regexp directly
"""
regexp = filters.pop("regexp", False)
keep = np.ones(len(self), dtype=bool)
# filter by columns and list of values
for col, values in filters.items():
# treat `_apply_filters(col=None)` as no filter applied
if values is None:
continue
if col in self.meta.columns:
matches = pattern_match(
self.meta[col], values, regexp=regexp, has_nan=True
)
cat_idx = self.meta[matches].index
keep_col = _make_index(
self._data, cols=self.index.names, unique=False
).isin(cat_idx)
elif col == "time_domain":
# fast-pass if `self` already has selected time-domain
if self.time_domain == values:
keep_col = np.ones(len(self), dtype=bool)
else:
levels, codes = get_index_levels_codes(self._data, self.time_col)
keep_col = filter_by_time_domain(values, levels, codes)
elif col == "year":
levels, codes = get_index_levels_codes(self._data, self.time_col)
keep_col = filter_by_year(self.time_col, values, levels, codes)
elif col in ["month", "hour", "day"]:
if self.time_col != "time":
logger.error(f"Filter by `{col}` not supported with yearly data.")
return np.zeros(len(self), dtype=bool)
keep_col = filter_by_dt_arg(col, values, self.get_data_column("time"))
elif col == "time":
if self.time_col != "time":
logger.error(f"Filter by `{col}` not supported with yearly data.")
return np.zeros(len(self), dtype=bool)
keep_col = datetime_match(self.get_data_column("time"), values)
elif col in self.dimensions:
levels, codes = get_index_levels_codes(self._data, col)
matches = pattern_match(
levels,
values,
regexp=regexp,
level=level if col == "variable" else None,
has_nan=True,
return_codes=True,
)
keep_col = get_keep_col(codes, matches)
else:
raise ValueError(f"Filter by `{col}` not supported!")
keep = np.logical_and(keep, keep_col)
if level is not None and "variable" not in filters:
col = "variable"
lvl_index, lvl_codes = get_index_levels_codes(self._data, col)
matches = find_depth(lvl_index, level=level)
keep_col = get_keep_col(lvl_codes, matches)
keep = np.logical_and(keep, keep_col)
return keep
def col_apply(self, col, func, *args, **kwargs):
"""Apply a function to a column of data or meta
Parameters
----------
col: str
column in either data or meta dataframes
func: function
function to apply
"""
if col in self.data:
self.data[col] = self.data[col].apply(func, *args, **kwargs)
else:
self.meta[col] = self.meta[col].apply(func, *args, **kwargs)
def add(
self, a, b, name, axis="variable", fillna=None, ignore_units=False, append=False
):
"""Add timeseries data items `a` and `b` along an `axis`
This function computes `a + b`. If `a` or `b` are lists, the method applies
:meth:`pandas.groupby().sum() <pandas.core.groupby.GroupBy.sum>` on each group.
If either `a` or `b` are not defined for a row and `fillna` is not specified,
no value is computed for that row.
Parameters
----------
a, b : str, list of str or a number
Items to be used for the addition.
name : str
Name of the computed timeseries data on the `axis`.
axis : str, optional
Axis along which to compute.
fillna : dict or scalar, optional
Value to fill holes when rows are not defined for either `a` or `b`.
Can be a scalar or a dictionary of the form :code:`{arg: default}`.
ignore_units : bool or str, optional
Perform operation on values without considering units. Set units of returned
data to `unknown` (if True) or the value of `ignore_units` (if str).
append : bool, optional
Whether to append aggregated timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
subtract, multiply, divide
apply : Apply a custom function on the timeseries data along any axis.
aggregate : Aggregate timeseries data along the `variable` hierarchy.
aggregate_region : Aggregate timeseries data along the `region` dimension.
Notes
-----
This function uses the :mod:`pint` package and the :mod:`iam-units` registry
(`read the docs <https://github.com/IAMconsortium/units>`_) to handle units.
:mod:`pyam` will keep notation consistent with the input format (if possible)
and otherwise uses abbreviated units :code:`'{:~}'.format(u)` (see
`here <https://pint.readthedocs.io/en/stable/tutorial.html#string-formatting>`_
for more information).
As a result, the notation of returned units may differ from the input format.
For example, the unit :code:`EJ/yr` may be reformatted to :code:`EJ / a`.
"""
kwds = dict(axis=axis, fillna=fillna, ignore_units=ignore_units)
_value = _op_data(self, name, "add", **kwds, a=a, b=b)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def subtract(
self, a, b, name, axis="variable", fillna=None, ignore_units=False, append=False
):
"""Compute the difference of timeseries data items `a` and `b` along an `axis`
This function computes `a - b`. If `a` or `b` are lists, the method applies
:meth:`pandas.groupby().sum() <pandas.core.groupby.GroupBy.sum>` on each group.
If either `a` or `b` are not defined for a row and `fillna` is not specified,
no value is computed for that row.
Parameters
----------
a, b : str, list of str or a number
Items to be used for the subtraction.
name : str
Name of the computed timeseries data on the `axis`.
axis : str, optional
Axis along which to compute.
fillna : dict or scalar, optional
Value to fill holes when rows are not defined for either `a` or `b`.
Can be a scalar or a dictionary of the form :code:`{arg: default}`.
ignore_units : bool or str, optional
Perform operation on values without considering units. Set units of returned
data to `unknown` (if True) or the value of `ignore_units` (if str).
append : bool, optional
Whether to append aggregated timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
add, multiply, divide
diff : Compute the difference of timeseries data along the time dimension.
apply : Apply a custom function on the timeseries data along any axis.
Notes
-----
This function uses the :mod:`pint` package and the :mod:`iam-units` registry
(`read the docs <https://github.com/IAMconsortium/units>`_) to handle units.
:mod:`pyam` will keep notation consistent with the input format (if possible)
and otherwise uses abbreviated units :code:`'{:~}'.format(u)` (see
`here <https://pint.readthedocs.io/en/stable/tutorial.html#string-formatting>`_
for more information).
As a result, the notation of returned units may differ from the input format.
For example, the unit :code:`EJ/yr` may be reformatted to :code:`EJ / a`.
"""
kwds = dict(axis=axis, fillna=fillna, ignore_units=ignore_units)
_value = _op_data(self, name, "subtract", **kwds, a=a, b=b)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def multiply(
self, a, b, name, axis="variable", fillna=None, ignore_units=False, append=False
):
"""Multiply timeseries data items `a` and `b` along an `axis`
This function computes `a * b`. If `a` or `b` are lists, the method applies
:meth:`pandas.groupby().sum() <pandas.core.groupby.GroupBy.sum>` on each group.
If either `a` or `b` are not defined for a row and `fillna` is not specified,
no value is computed for that row.
Parameters
----------
a, b : str, list of str or a number
Items to be used for the division.
name : str
Name of the computed timeseries data on the `axis`.
axis : str, optional
Axis along which to compute.
fillna : dict or scalar, optional
Value to fill holes when rows are not defined for either `a` or `b`.
Can be a scalar or a dictionary of the form :code:`{arg: default}`.
ignore_units : bool or str, optional
Perform operation on values without considering units. Set units of returned
data to `unknown` (if True) or the value of `ignore_units` (if str).
append : bool, optional
Whether to append aggregated timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
add, subtract, divide
apply : Apply a custom function on the timeseries data along any axis.
Notes
-----
This function uses the :mod:`pint` package and the :mod:`iam-units` registry
(`read the docs <https://github.com/IAMconsortium/units>`_) to handle units.
:mod:`pyam` will keep notation consistent with the input format (if possible)
and otherwise uses abbreviated units :code:`'{:~}'.format(u)` (see
`here <https://pint.readthedocs.io/en/stable/tutorial.html#string-formatting>`_
for more information).
As a result, the notation of returned units may differ from the input format.
For example, the unit :code:`EJ/yr` may be reformatted to :code:`EJ / a`.
"""
kwds = dict(axis=axis, fillna=fillna, ignore_units=ignore_units)
_value = _op_data(self, name, "multiply", **kwds, a=a, b=b)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def divide(
self, a, b, name, axis="variable", fillna=None, ignore_units=False, append=False
):
"""Divide the timeseries data items `a` and `b` along an `axis`
This function computes `a / b`. If `a` or `b` are lists, the method applies
:meth:`pandas.groupby().sum() <pandas.core.groupby.GroupBy.sum>` on each group.
If either `a` or `b` are not defined for a row and `fillna` is not specified,
no value is computed for that row.
Parameters
----------
a, b : str, list of str or a number
Items to be used for the division.
name : str
Name of the computed timeseries data on the `axis`.
axis : str, optional
Axis along which to compute.
fillna : dict or scalar, optional
Value to fill holes when rows are not defined for either `a` or `b`.
Can be a scalar or a dictionary of the form :code:`{arg: default}`.
ignore_units : bool or str, optional
Perform operation on values without considering units. Set units of returned
data to `unknown` (if True) or the value of `ignore_units` (if str).
append : bool, optional
Whether to append aggregated timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
add, subtract, multiply
apply : Apply a custom function on the timeseries data along any axis.
Notes
-----
This function uses the :mod:`pint` package and the :mod:`iam-units` registry
(`read the docs <https://github.com/IAMconsortium/units>`_) to handle units.
:mod:`pyam` will keep notation consistent with the input format (if possible)
and otherwise uses abbreviated units :code:`'{:~}'.format(u)` (see
`here <https://pint.readthedocs.io/en/stable/tutorial.html#string-formatting>`_
for more information).
As a result, the notation of returned units may differ from the input format.
For example, the unit :code:`EJ/yr` may be reformatted to :code:`EJ / a`.
"""
kwds = dict(axis=axis, fillna=fillna, ignore_units=ignore_units)
_value = _op_data(self, name, "divide", **kwds, a=a, b=b)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def apply(
self, func, name, axis="variable", fillna=None, append=False, args=(), **kwds
):
"""Apply a function to components of timeseries data along an `axis`
This function computes a function `func` using timeseries data selected
along an `axis` downselected by keyword arguments.
The length of components needs to match the number of required arguments
of `func`.
Parameters
----------
func : function
Function to apply to `components` along `axis`.
name : str
Name of the computed timeseries data on the `axis`.
axis : str, optional
Axis along which to compute.
fillna : dict or scalar, optional
Value to fill holes when rows are not defined for items in `args` or `kwds`.
Can be a scalar or a dictionary of the form :code:`{kwd: default}`.
append : bool, optional
Whether to append aggregated timeseries data to this instance.
args : tuple or list of str
List of variables to pass as positional arguments to `func`.
**kwds
Additional keyword arguments to pass as keyword arguments to `func`. If the
name of a variable is given, the associated timeseries is passed. Otherwise
the value itself is passed.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
add, subtract, multiply, divide, diff
Notes
-----
This function uses the :mod:`pint` package and the :mod:`iam-units` registry
(`read the docs <https://github.com/IAMconsortium/units>`_) to handle units.
:mod:`pyam` uses abbreviated units :code:`'{:~}'.format(u)` (see
`here <https://pint.readthedocs.io/en/stable/tutorial.html#string-formatting>`_
for more information).
As a result, the notation of returned units may differ from the input format.
For example, the unit :code:`EJ/yr` may be reformatted to :code:`EJ / a`.
"""
_value = _op_data(self, name, func, axis=axis, fillna=fillna, args=args, **kwds)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def diff(self, mapping, periods=1, append=False):
"""Compute the difference of timeseries data along the time dimension
This methods behaves as if applying :meth:`pandas.DataFrame.diff` on the
timeseries data in wide format.
By default, the diff-value in period *t* is computed as *x[t] - x[t-1]*.
Parameters
----------
mapping : dict
Mapping of *variable* item(s) to the name(s) of the diff-ed timeseries data,
e.g.,
.. code-block:: python
{"current variable": "name of diff-ed variable", ...}
periods : int, optional
Periods to shift for calculating difference, accepts negative values;
passed to :meth:`pandas.DataFrame.diff`.
append : bool, optional
Whether to append computed timeseries data to this instance.
Returns
-------
:class:`IamDataFrame` or **None**
Computed timeseries data or None if `append=True`.
See Also
--------
subtract, apply, interpolate
Notes
-----
This method behaves as if applying :meth:`pandas.DataFrame.diff` by row in a
wide data format, so the difference is computed on the previous existing value.
This can lead to unexpected results if the data has inconsistent period lengths.
Use the following to ensure that no missing values exist prior to computing
the difference:
.. code-block:: python
df.interpolate(time=df.year)
"""
cols = [d for d in self.dimensions if d != self.time_col]
_value = self.filter(variable=mapping)._data.groupby(cols).diff(periods=periods)
_value.index = replace_index_values(_value.index, "variable", mapping)
# append to `self` or return as `IamDataFrame`
return self._finalize(_value, append=append)
def _to_file_format(self, iamc_index):
"""Return a dataframe suitable for writing to a file"""
df = self.timeseries(iamc_index=iamc_index).reset_index()
df = df.rename(columns={c: str(c).title() for c in df.columns})
return df
def to_csv(self, path, iamc_index=False, **kwargs):
"""Write timeseries data of this object to a csv file
Parameters
----------
path : str or path object
file path or :class:`pathlib.Path`
iamc_index : bool, default False
if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;
else, use all 'data' columns
"""
self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs)
def to_excel(
self,
excel_writer,
sheet_name="data",
iamc_index=False,
include_meta=True,
**kwargs,
):
"""Write object to an Excel spreadsheet
Parameters
----------
excel_writer : str, path object or ExcelWriter object
any valid string path, :class:`pathlib.Path`
or :class:`pandas.ExcelWriter`
sheet_name : string
name of sheet which will contain :meth:`timeseries` data
iamc_index : bool, default False
if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;
else, use all 'data' columns
include_meta : boolean or string
if True, write 'meta' to an Excel sheet name 'meta' (default);
if this is a string, use it as sheet name
"""
# open a new ExcelWriter instance (if necessary)
close = False
if not isinstance(excel_writer, pd.ExcelWriter):
close = True
excel_writer = pd.ExcelWriter(excel_writer, engine="openpyxl")
# write data table
write_sheet(excel_writer, sheet_name, self._to_file_format(iamc_index))
# write meta table unless `include_meta=False`
if include_meta:
meta_rename = dict([(i, i.capitalize()) for i in META_IDX])
write_sheet(
excel_writer,
"meta" if include_meta is True else include_meta,
self.meta.reset_index().rename(columns=meta_rename),
)
# close the file if `excel_writer` arg was a file name
if close:
excel_writer.close()
def export_meta(self, excel_writer, sheet_name="meta"):
"""Write the 'meta' indicators of this object to an Excel sheet
Parameters
----------
excel_writer : str, path object or ExcelWriter object
any valid string path, :class:`pathlib.Path`
or :class:`pandas.ExcelWriter`
sheet_name : str
name of sheet which will contain dataframe of 'meta' indicators
"""
close = False
if not isinstance(excel_writer, pd.ExcelWriter):
excel_writer, close = pd.ExcelWriter(excel_writer), True
write_sheet(excel_writer, sheet_name, self.meta, index=True)
if close:
excel_writer.close()
def to_datapackage(self, path):
"""Write object to a frictionless Data Package
More information: https://frictionlessdata.io
Returns the saved :class:`datapackage.Package`
(|datapackage.Package.docs|).
When adding metadata (descriptors), please follow the `template`
defined by https://github.com/OpenEnergyPlatform/metadata
Parameters
----------
path : string or path object
any valid string path or :class:`pathlib.Path`
"""
if not HAS_DATAPACKAGE:
raise ImportError("Required package `datapackage` not found!")
with TemporaryDirectory(dir=".") as tmp:
# save data and meta tables to a temporary folder
self.data.to_csv(Path(tmp) / "data.csv", index=False)
self.meta.to_csv(Path(tmp) / "meta.csv")
# cast tables to datapackage
package = Package()
package.infer("{}/*.csv".format(tmp))
if not package.valid:
logger.warning("The exported datapackage is not valid")
package.save(path)
# return the package (needs to reloaded because `tmp` was deleted)
return Package(path)
def load_meta(
self, path, sheet_name="meta", ignore_conflict=False, *args, **kwargs
):
"""Load 'meta' indicators from file
Parameters
----------
path : str, :class:`pathlib.Path` or :class:`pandas.ExcelFile`
A valid path or instance of an xlsx or csv file
sheet_name : str, optional
Name of the sheet to be parsed (if xlsx)
ignore_conflict : bool, optional
If `True`, values in `path` take precedence over existing `meta`.
If `False`, raise an error in case of conflicts.
kwargs
Passed to :func:`pandas.read_excel` or :func:`pandas.read_csv`
"""
# load from file
path = path if isinstance(path, pd.ExcelFile) else Path(path)
df = read_pandas(path, sheet_name=sheet_name, **kwargs)
# cast model-scenario column headers to lower-case (if necessary)
df = df.rename(columns=dict([(i.capitalize(), i) for i in META_IDX]))
# check that required index columns exist
missing_cols = [c for c in self.index.names if c not in df.columns]
if missing_cols:
raise ValueError(
f"File {Path(path)} (sheet {sheet_name}) "
f"missing required index columns {missing_cols}!"
)
# set index, filter to relevant scenarios from imported file
n = len(df)
df.set_index(self.index.names, inplace=True)
df = df.loc[self.meta.index.intersection(df.index)]
# skip import of meta indicators if np
if not n:
logger.info(f"No scenarios found in sheet {sheet_name}")
return
msg = "Reading meta indicators"
# indicate if not all scenarios are included in the meta file
if len(df) < len(self.meta):
i = len(self.meta)
msg += f" for {len(df)} out of {i} scenario{s(i)}"
# indicate if more scenarios exist in meta file than in self
invalid = n - len(df)
if invalid:
msg += f", ignoring {invalid} scenario{s(invalid)} from file"
logger.warning(msg)
else:
logger.info(msg)
# merge imported meta indicators
self.meta = merge_meta(df, self.meta, ignore_conflict=ignore_conflict)
def map_regions(
self,
map_col,
agg=None,
copy_col=None,
fname=None,
region_col=None,
remove_duplicates=False,
inplace=False,
):
"""Plot regional data for a single model, scenario, variable, and year
see pyam.plotting.region_plot() for all available options
Parameters
----------
map_col : str
The column used to map new regions to. Common examples include
iso and 5_region.
agg : str, optional
Perform a data aggregation. Options include: sum.
copy_col : str, optional
Copy the existing region data into a new column for later use.
fname : str, optional
Use a non-default region mapping file
region_col : string, optional
Use a non-default column name for regions to map from.
remove_duplicates : bool, optional
If there are duplicates in the mapping from one regional level to
another, then remove these duplicates by counting the most common
mapped value.
This option is most useful when mapping from high resolution
(e.g., model regions) to low resolution (e.g., 5_region).
inplace : bool, optional
if True, do operation inplace and return None
"""
fname = fname or run_control()["region_mapping"]["default"]
mapping = read_pandas(Path(fname)).rename(str.lower, axis="columns")
map_col = map_col.lower()
ret = self.copy() if not inplace else self
_df = ret.data
columns_orderd = _df.columns
# merge data
dfs = []
for model in self.model:
df = _df[_df["model"] == model]
_col = region_col or "{}.REGION".format(model)
_map = mapping.rename(columns={_col.lower(): "region"})
_map = _map[["region", map_col]].dropna().drop_duplicates()
_map = _map[_map["region"].isin(_df["region"])]
if remove_duplicates and _map["region"].duplicated().any():
# find duplicates
where_dup = _map["region"].duplicated(keep=False)
dups = _map[where_dup]
logger.warning(
"""
Duplicate entries found for the following regions.
Mapping will occur only for the most common instance.
{}""".format(
dups["region"].unique()
)
)
# get non duplicates
_map = _map[~where_dup]
# order duplicates by the count frequency
dups = (
dups.groupby(["region", map_col])
.size()
.reset_index(name="count")
.sort_values(by="count", ascending=False)
.drop("count", axis=1)
)
# take top occurance
dups = dups[~dups["region"].duplicated(keep="first")]
# combine them back
_map = pd.concat([_map, dups])
if copy_col is not None:
df[copy_col] = df["region"]
df = (
df.merge(_map, on="region")
.drop("region", axis=1)
.rename(columns={map_col: "region"})
)
dfs.append(df)
df = pd.concat(dfs)
# perform aggregations
if agg == "sum":
df = df.groupby(self.dimensions).sum().reset_index()
df = (
df.reindex(columns=columns_orderd)
.sort_values(SORT_IDX)
.reset_index(drop=True)
)
ret._data = df.set_index(self.dimensions).value
if not inplace:
return ret
def _meta_idx(data):
"""Return the 'META_IDX' from data by index"""
return data[META_IDX].drop_duplicates().set_index(META_IDX).index
def _check_rows(rows, check, in_range=True, return_test="any"):
"""Check all rows to be in/out of a certain range and provide testing on
return values based on provided conditions
Parameters
----------
rows : pd.DataFrame
data rows
check : dict
dictionary with possible values of 'up', 'lo', and 'year'
in_range : bool, optional
check if values are inside or outside of provided range
return_test : str, optional
possible values:
- 'any': default, return scenarios where check passes for any entry
- 'all': test if all values match checks, if not, return empty set
"""
valid_checks = set(["up", "lo", "year"])
if not set(check.keys()).issubset(valid_checks):
msg = "Unknown checking type: {}"
raise ValueError(msg.format(check.keys() - valid_checks))
if "year" not in check:
where_idx = set(rows.index)
else:
if "time" in rows.index.names:
_years = rows.index.get_level_values("time").year
else:
_years = rows.index.get_level_values("year")
where_idx = set(rows.index[_years == check["year"]])
rows = rows.loc[list(where_idx)]
up_op = rows.values.__le__ if in_range else rows.values.__gt__
lo_op = rows.values.__ge__ if in_range else rows.values.__lt__
check_idx = []
for (bd, op) in [("up", up_op), ("lo", lo_op)]:
if bd in check:
check_idx.append(set(rows.index[op(check[bd])]))
if return_test == "any":
ret = where_idx & set.union(*check_idx)
elif return_test == "all":
ret = where_idx if where_idx == set.intersection(*check_idx) else set()
else:
raise ValueError("Unknown return test: {}".format(return_test))
return ret
def _apply_criteria(df, criteria, **kwargs):
"""Apply criteria individually to every model/scenario instance"""
idxs = []
for var, check in criteria.items():
_df = df[df.index.get_level_values("variable") == var]
for group in _df.groupby(META_IDX):
grp_idxs = _check_rows(group[-1], check, **kwargs)
idxs.append(grp_idxs)
df = df.loc[itertools.chain(*idxs)]
return df
def _make_index(df, cols=META_IDX, unique=True):
"""Create an index from the columns/index of a dataframe or series"""
def _get_col(c):
try:
return df.index.get_level_values(c)
except KeyError:
return df[c]
index = list(zip(*[_get_col(col) for col in cols]))
if unique:
index = pd.unique(index)
return pd.MultiIndex.from_tuples(index, names=tuple(cols))
def _empty_iamframe(index):
"""Return an empty IamDataFrame with the correct index columns"""
return IamDataFrame(pd.DataFrame([], columns=index))
def validate(df, criteria={}, exclude_on_fail=False, **kwargs):
"""Validate scenarios using criteria on timeseries values
Returns all scenarios which do not match the criteria and prints a log
message or returns None if all scenarios match the criteria.
When called with `exclude_on_fail=True`, scenarios in `df` not satisfying
the criteria will be marked as `exclude=True` (object modified in place).
Parameters
----------
df : IamDataFrame
args : passed to :meth:`IamDataFrame.validate`
kwargs : used for downselecting IamDataFrame
passed to :meth:`IamDataFrame.filter`
"""
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.validate(criteria=criteria, exclude_on_fail=exclude_on_fail)
df.meta["exclude"] |= fdf.meta["exclude"] # update if any excluded
return vdf
def require_variable(
df, variable, unit=None, year=None, exclude_on_fail=False, **kwargs
):
"""Check whether all scenarios have a required variable
Parameters
----------
df : IamDataFrame
args : passed to :meth:`IamDataFrame.require_variable`
kwargs : used for downselecting IamDataFrame
passed to :meth:`IamDataFrame.filter`
"""
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.require_variable(
variable=variable, unit=unit, year=year, exclude_on_fail=exclude_on_fail
)
df.meta["exclude"] |= fdf.meta["exclude"] # update if any excluded
return vdf
def categorize(
df, name, value, criteria, color=None, marker=None, linestyle=None, **kwargs
):
"""Assign scenarios to a category according to specific criteria
or display the category assignment
Parameters
----------
df : IamDataFrame
args : passed to :meth:`IamDataFrame.categorize`
kwargs : used for downselecting IamDataFrame
passed to :meth:`IamDataFrame.filter`
"""
fdf = df.filter(**kwargs)
fdf.categorize(
name=name,
value=value,
criteria=criteria,
color=color,
marker=marker,
linestyle=linestyle,
)
# update meta indicators
if name in df.meta:
df.meta[name].update(fdf.meta[name])
else:
df.meta[name] = fdf.meta[name]
def check_aggregate(
df, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs
):
"""Check whether the timeseries values match the aggregation
of sub-categories
Parameters
----------
df : IamDataFrame
args : passed to :meth:`IamDataFrame.check_aggregate`
kwargs : used for downselecting IamDataFrame
passed to :meth:`IamDataFrame.filter`
"""
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.check_aggregate(
variable=variable,
components=components,
exclude_on_fail=exclude_on_fail,
multiplier=multiplier,
)
df.meta["exclude"] |= fdf.meta["exclude"] # update if any excluded
return vdf
def filter_by_meta(data, df, join_meta=False, **kwargs):
"""Filter by and join meta columns from an IamDataFrame to a pd.DataFrame
Parameters
----------
data : pandas.DataFrame
:class:`pandas.DataFrame` to which meta columns are to be joined,
index or columns must include `['model', 'scenario']`
df : IamDataFrame
IamDataFrame from which meta columns are filtered and joined (optional)
join_meta : bool, default False
join selected columns from `df.meta` on `data`
kwargs
meta columns to be filtered/joined, where `col=...` applies filters
with the given arguments (using :meth:`utils.pattern_match`).
Using `col=None` joins the column without filtering (setting col
to nan if `(model, scenario)` not in `df.meta.index`)
"""
if not set(META_IDX).issubset(data.index.names + list(data.columns)):
raise ValueError("Missing required index dimensions or columns!")
meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy())
# filter meta by columns
keep = np.array([True] * len(meta))
apply_filter = False
for col, values in kwargs.items():
if col in META_IDX and values is not None:
_col = meta.index.get_level_values(0 if col == "model" else 1)
keep &= pattern_match(_col, values, has_nan=False)
apply_filter = True
elif values is not None:
keep &= pattern_match(meta[col], values)
apply_filter |= values is not None
meta = meta[keep]
# set the data index to META_IDX and apply filtered meta index
idx = list(data.index.names) if not data.index.names == [None] else None
data = data.reset_index().set_index(META_IDX)
meta = meta.loc[meta.index.intersection(data.index.drop_duplicates())]
meta.index.names = META_IDX
if apply_filter:
data = data.loc[meta.index]
data.index.names = META_IDX
# join meta (optional), reset index to format as input arg
data = data.join(meta) if join_meta else data
data = data.reset_index().set_index(idx or "index")
if idx is None:
data.index.name = None
return data
def compare(
left, right, left_label="left", right_label="right", drop_close=True, **kwargs
):
"""Compare the data in two IamDataFrames and return a pandas.DataFrame
Parameters
----------
left, right : IamDataFrames
two :class:`IamDataFrame` instances to be compared
left_label, right_label : str, default `left`, `right`
column names of the returned :class:`pandas.DataFrame`
drop_close : bool, optional
remove all data where `left` and `right` are close
kwargs : arguments for comparison of values
passed to :func:`numpy.isclose`
"""
return _compare(left, right, left_label, right_label, drop_close=True, **kwargs)
def concat(objs, ignore_meta_conflict=False, **kwargs):
"""Concatenate a series of IamDataFrame-like objects
Parameters
----------
objs : iterable of IamDataFrames
A list of objects castable to :class:`IamDataFrame`
ignore_meta_conflict : bool, optional
If False, raise an error if any meta columns present in `dfs` are not identical.
If True, values in earlier elements of `dfs` take precedence.
kwargs
Passed to :class:`IamDataFrame(other, **kwargs) <IamDataFrame>`
for any item of `dfs` which isn't already an IamDataFrame.
Returns
-------
IamDataFrame
Raises
------
TypeError
If `dfs` is not a list.
ValueError
If time domain or other timeseries data index dimension don't match.
Notes
-----
The *meta* attributes are merged only for those objects of *objs* that are passed
as :class:`IamDataFrame` instances.
The :attr:`dimensions` and :attr:`index` names of all elements of *dfs* must be
identical. The returned IamDataFrame inherits the dimensions and index names.
"""
if not islistable(objs) or isinstance(objs, pd.DataFrame):
raise TypeError(f"'{objs.__class__.__name__}' object is not iterable")
objs = list(objs)
if len(objs) < 1:
raise ValueError("No objects to concatenate")
def as_iamdataframe(df):
if isinstance(df, IamDataFrame):
return df, True
else:
return IamDataFrame(df, **kwargs), False
# cast first item to IamDataFrame (if necessary)
df, _merge_meta = as_iamdataframe(objs[0])
extra_cols, time_col = df.extra_cols, df.time_col
consistent_time_domain = True
iam_dfs = [(df, _merge_meta)]
# cast all items to IamDataFrame (if necessary) and check consistency of items
for df in objs[1:]:
df, _merge_meta = as_iamdataframe(df)
if df.extra_cols != extra_cols:
raise ValueError("Items have incompatible timeseries data dimensions")
if df.time_col != time_col:
consistent_time_domain = False
iam_dfs.append((df, _merge_meta))
# cast all instances to "time"
if not consistent_time_domain:
_iam_dfs = []
for (df, _merge_meta) in iam_dfs:
if df.time_col == "year":
df = df.swap_year_for_time()
_iam_dfs.append((df, _merge_meta))
iam_dfs = _iam_dfs # replace list of IamDataFrames with consistent list
# extract timeseries data and meta attributes
ret_data, ret_meta = [], None
for (df, _merge_meta) in iam_dfs:
ret_data.append(df._data)
if _merge_meta:
ret_meta = (
df.meta
if ret_meta is None
else merge_meta(ret_meta, df.meta, ignore_meta_conflict)
)
# return as new IamDataFrame, this will verify integrity as part of `__init__()`
return IamDataFrame(
pd.concat(ret_data, verify_integrity=False),
meta=ret_meta,
index=ret_meta.index.names,
)
def read_datapackage(path, data="data", meta="meta"):
"""Read timeseries data and meta-indicators from frictionless Data Package
Parameters
----------
path : string or path object
any valid string path or :class:`pathlib.Path`, |br|
passed to :class:`datapackage.Package` (|datapackage.Package.docs|)
data : str, optional
resource containing timeseries data in IAMC-compatible format
meta : str, optional
(optional) resource containing a table of categorization and
quantitative indicators
"""
if not HAS_DATAPACKAGE: # pragma: no cover
raise ImportError("Required package `datapackage` not found!")
package = Package(path)
def _get_column_names(x):
return [i["name"] for i in x.descriptor["schema"]["fields"]]
# read `data` table
resource_data = package.get_resource(data)
_data = pd.DataFrame(resource_data.read())
_data.columns = _get_column_names(resource_data)
df = IamDataFrame(_data)
# read `meta` table
if meta in package.resource_names:
resource_meta = package.get_resource(meta)
_meta = pd.DataFrame(resource_meta.read())
_meta.columns = _get_column_names(resource_meta)
df.meta = _meta.set_index(META_IDX)
return df
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Site error log module.
"""
import logging
import os
import sys
import time
from random import random
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.unauthorized import Unauthorized
from Acquisition import aq_acquire
from Acquisition import aq_base
from App.Dialogs import MessageDialog
from OFS.SimpleItem import SimpleItem
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from transaction.interfaces import TransientError
from zExceptions.ExceptionFormatter import format_exception
from zope.component import adapter
from zope.event import notify
from ZPublisher.interfaces import IPubFailure
from .interfaces import ErrorRaisedEvent
try:
# Python 3
from _thread import allocate_lock
except ImportError:
# Python 2
from thread import allocate_lock
LOG = logging.getLogger('Zope.SiteErrorLog')
# Permission names
use_error_logging = 'Log Site Errors'
log_to_event_log = 'Log to the Event Log'
# We want to restrict the rate at which errors are sent to the Event Log
# because we know that these errors can be generated quick enough to
# flood some zLOG backends. zLOG is used to notify someone of a problem,
# not to record every instance.
# This dictionary maps exception name to a value which encodes when we
# can next send the error with that name into the event log. This dictionary
# is shared between threads and instances. Concurrent access will not
# do much harm.
_rate_restrict_pool = {}
# The number of seconds that must elapse on average between sending two
# exceptions of the same name into the the Event Log. one per minute.
_rate_restrict_period = 60
# The number of exceptions to allow in a burst before the above limit
# kicks in. We allow five exceptions, before limiting them to one per
# minute.
_rate_restrict_burst = 5
_www = os.path.join(os.path.dirname(__file__), 'www')
# temp_logs holds the logs.
temp_logs = {} # { oid -> [ traceback string ] }
cleanup_lock = allocate_lock()
try:
# Python 2
bstr = basestring
except NameError:
# Python 3
bstr = str
class SiteErrorLog(SimpleItem):
"""Site error log class. You can put an error log anywhere in the tree
and exceptions in that area will be posted to the site error log.
"""
meta_type = 'Site Error Log'
id = 'error_log'
zmi_icon = 'fas fa-bug'
zmi_show_add_dialog = False
keep_entries = 20
copy_to_zlog = True
security = ClassSecurityInfo()
manage_options = (
{'label': 'Log', 'action': 'manage_main'},
) + SimpleItem.manage_options
security.declareProtected(use_error_logging, 'manage_main') # NOQA: D001
manage_main = PageTemplateFile('main.pt', _www)
security.declareProtected(use_error_logging, 'showEntry') # NOQA: D001
showEntry = PageTemplateFile('showEntry.pt', _www)
@security.private
def manage_beforeDelete(self, item, container):
if item is self:
try:
del container.__error_log__
except AttributeError:
pass
@security.private
def manage_afterAdd(self, item, container):
if item is self:
container.__error_log__ = aq_base(self)
def _setId(self, id):
if id != self.id:
raise ValueError(MessageDialog(
title='Invalid Id',
message='Cannot change the id of a SiteErrorLog',
action='./manage_main'))
def _getLog(self):
"""Returns the log for this object.
Careful, the log is shared between threads.
"""
log = temp_logs.get(self._p_oid, None)
if log is None:
log = []
temp_logs[self._p_oid] = log
return log
@security.protected(use_error_logging)
def forgetEntry(self, id, REQUEST=None):
"""Removes an entry from the error log."""
log = self._getLog()
cleanup_lock.acquire()
i = 0
for entry in log:
if entry['id'] == id:
del log[i]
i += 1
cleanup_lock.release()
if REQUEST is not None:
REQUEST.RESPONSE.redirect(
'%s/manage_main?manage_tabs_message='
'Error+log+entry+was+removed.' %
self.absolute_url())
# Exceptions that happen all the time, so we dont need
# to log them. Eventually this should be configured
# through-the-web.
_ignored_exceptions = ('Unauthorized', 'NotFound', 'Redirect')
@security.private
def raising(self, info):
"""Log an exception.
Called by SimpleItem's exception handler.
Returns the url to view the error log entry
"""
now = time.time()
try:
tb_text = None
tb_html = None
strtype = str(getattr(info[0], '__name__', info[0]))
if strtype in self._ignored_exceptions:
return
if not isinstance(info[2], bstr):
tb_text = ''.join(
format_exception(*info, **{'as_html': 0}))
tb_html = ''.join(
format_exception(*info, **{'as_html': 1}))
else:
tb_text = info[2]
request = getattr(self, 'REQUEST', None)
url = None
username = None
userid = None
req_html = None
try:
strv = str(info[1])
except Exception:
strv = '<unprintable %s object>' % type(info[1]).__name__
if request:
url = request.get('URL', '?')
usr = getSecurityManager().getUser()
username = usr.getUserName()
userid = usr.getId()
try:
req_html = str(request)
except Exception:
pass
if strtype == 'NotFound':
strv = url
next = request['TraversalRequestNameStack']
if next:
next = list(next)
next.reverse()
strv = '%s [ /%s ]' % (strv, '/'.join(next))
log = self._getLog()
entry_id = str(now) + str(random()) # Low chance of collision
log.append({
'type': strtype,
'value': strv,
'time': now,
'id': entry_id,
'tb_text': tb_text,
'tb_html': tb_html,
'username': username,
'userid': userid,
'url': url,
'req_html': req_html})
cleanup_lock.acquire()
try:
if len(log) >= self.keep_entries:
del log[:-self.keep_entries]
finally:
cleanup_lock.release()
except Exception:
LOG.error('Error while logging', exc_info=sys.exc_info())
else:
notify(ErrorRaisedEvent(log[-1]))
if self.copy_to_zlog:
self._do_copy_to_zlog(now, strtype, entry_id,
str(url), tb_text)
return '%s/showEntry?id=%s' % (self.absolute_url(), entry_id)
def _do_copy_to_zlog(self, now, strtype, entry_id, url, tb_text):
when = _rate_restrict_pool.get(strtype, 0)
if now > when:
next_when = max(when,
now - _rate_restrict_burst * _rate_restrict_period)
next_when += _rate_restrict_period
_rate_restrict_pool[strtype] = next_when
LOG.error('%s %s\n%s' % (entry_id, url, tb_text.rstrip()))
@security.protected(use_error_logging)
def getProperties(self):
return {
'keep_entries': self.keep_entries,
'copy_to_zlog': self.copy_to_zlog,
'ignored_exceptions': self._ignored_exceptions,
}
@security.protected(log_to_event_log)
def checkEventLogPermission(self):
if not getSecurityManager().checkPermission(log_to_event_log, self):
raise Unauthorized('You do not have the "%s" permission.' %
log_to_event_log)
return 1
@security.protected(use_error_logging)
def setProperties(self, keep_entries, copy_to_zlog=0,
ignored_exceptions=(), RESPONSE=None):
"""Sets the properties of this site error log.
"""
copy_to_zlog = not not copy_to_zlog
if copy_to_zlog and not self.copy_to_zlog:
# Before turning on event logging, check the permission.
self.checkEventLogPermission()
self.keep_entries = int(keep_entries)
self.copy_to_zlog = copy_to_zlog
# filter out empty lines
# ensure we don't save exception objects but exceptions instead
self._ignored_exceptions = tuple(
[_f for _f in map(str, ignored_exceptions) if _f])
if RESPONSE is not None:
RESPONSE.redirect(
'%s/manage_main?manage_tabs_message=Changed+properties.' %
self.absolute_url())
@security.protected(use_error_logging)
def getLogEntries(self):
"""Returns the entries in the log, most recent first.
Makes a copy to prevent changes.
"""
# List incomprehension ;-)
res = [entry.copy() for entry in self._getLog()]
res.reverse()
return res
@security.protected(use_error_logging)
def getLogEntryById(self, id):
"""Returns the specified log entry.
Makes a copy to prevent changes. Returns None if not found.
"""
for entry in self._getLog():
if entry['id'] == id:
return entry.copy()
return None
@security.protected(use_error_logging)
def getLogEntryAsText(self, id, RESPONSE=None):
"""Returns the specified log entry.
Makes a copy to prevent changes. Returns None if not found.
"""
entry = self.getLogEntryById(id)
if entry is None:
return 'Log entry not found or expired'
if RESPONSE is not None:
RESPONSE.setHeader('Content-Type', 'text/plain')
return entry['tb_text']
InitializeClass(SiteErrorLog)
def manage_addErrorLog(dispatcher, RESPONSE=None):
"""Add a site error log to a container."""
log = SiteErrorLog()
dispatcher._setObject(log.id, log)
if RESPONSE is not None:
RESPONSE.redirect(
dispatcher.DestinationURL()
+ '/manage_main?manage_tabs_message=Error+Log+Added.')
@adapter(IPubFailure)
def IPubFailureSubscriber(event):
""" Handles an IPubFailure event triggered by the WSGI Publisher.
This handler forwards the event to the SiteErrorLog object
closest to the published object that the error occured with,
it logs no error if no published object was found.
"""
request = event.request
published = request.get('PUBLISHED')
if published is None: # likely a traversal problem
parents = request.get('PARENTS')
if parents:
# partially emulate successful traversal
published = request['PUBLISHED'] = parents.pop(0)
if published is None:
return
published = getattr(published, '__self__', published) # method --> object
# Filter out transient errors like ConflictErrors that can be
# retried, just log a short message instead.
if isinstance(event.exc_info[1], TransientError) and \
request.supports_retry():
LOG.info('%s at %s: %s. Request will be retried.' % (
event.exc_info[0].__name__,
request.get('PATH_INFO') or '<unknown>',
str(event.exc_info[1])))
return
try:
error_log = aq_acquire(published, '__error_log__', containment=1)
except AttributeError:
pass
else:
error_log.raising(event.exc_info)
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import lxml
from nikola.plugin_categories import RestExtension
from nikola.utils import LocaleBorg
class Plugin(RestExtension):
name = "gallery_directive"
def set_site(self, site):
self.site = site
Gallery.site = site
directives.register_directive('gallery', Gallery)
return super(Plugin, self).set_site(site)
class Gallery(Directive):
""" Restructured text extension for inserting an image gallery
Usage:
.. gallery:: foo
"""
has_content = False
required_arguments = 1
optional_arguments = 0
def run(self):
gallery_name = self.arguments[0]
kw = {
'output_folder': self.site.config['OUTPUT_FOLDER'],
'gallery_path': self.site.config['GALLERY_PATH'],
'thumbnail_size': self.site.config['THUMBNAIL_SIZE'],
}
gallery_folder = os.path.join(kw['output_folder'], kw['gallery_path'], gallery_name)
gallery_index = os.path.join(gallery_folder, 'index.html')
self.state.document.settings.record_dependencies.add(gallery_index)
with open(gallery_index, 'r') as inf:
data = inf.read()
dom = lxml.html.fromstring(data)
text = [e.text for e in dom.xpath('//script') if e.text and 'jsonContent = ' in e.text][0]
photo_array = json.loads(text.split(' = ', 1)[1].split(';', 1)[0])
for img in photo_array:
img['url'] = '/' + '/'.join([kw['gallery_path'], gallery_name, img['url']])
img['url_thumb'] = '/' + '/'.join([kw['gallery_path'], gallery_name, img['url_thumb']])
photo_array_json = json.dumps(photo_array)
context = {}
context['description'] = ''
context['title'] = ''
context['lang'] = LocaleBorg().current_lang
context['crumbs'] = []
context['folders'] = []
context['photo_array'] = photo_array
context['photo_array_json'] = photo_array_json
context['permalink'] = '#'
context.update(self.site.GLOBAL_CONTEXT)
context.update(kw)
output = self.site.template_system.render_template(
'gallery.tmpl',
None,
context
)
# This magical comment makes everything work. Try removing it!
output = '\n<!-- foo -->\n%s\n\n\n' % output
return [nodes.raw('', output, format='html')]
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^people/', views.people, name='people'),
url(r'^api/', views.api, name='people'),
url(r'^user_guide/', views.user_guide, name='user_guide'),
url(r'^disclaimer/', views.disclaimer, name='disclaimer'),
url(r'^confidence/', views.confidence, name='confidence'),
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', include(admin.site.urls)),
url(r'^basicviz/', include('basicviz.urls')),
url(r'^annotation/', include('annotation.urls')),
url(r'^massbank/', include('massbank.urls')),
url(r'^options/', include('options.urls')),
url(r'^registration/', include('registration.urls')),
url(r'^uploads/', include('uploads.urls')),
url(r'^decomposition/',include('decomposition.urls')),
url(r'^ms1analysis/', include('ms1analysis.urls')),
url(r'^motifdb/', include('motifdb.urls')),
] |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
FILES = 'files'
SONGS = 'songs'
ARTISTS = 'artists'
ALBUMS = 'albums'
MOVIES = 'movies'
TV_SHOWS = 'tvshows'
EPISODES = 'episodes'
VIDEOS = 'videos'
MUSIC_VIDEOS = 'musicvideos'
|
import json
import logging
import tweepy
from main import send_tweet_to_discord
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
class TwitterUser:
def __init__(self, _id, id_str, name, screen_name, location, url, description, translator_type, protected, verified, followers_count, friends_count, listed_count, favourites_count, statuses_count, created_at, utc_offset, time_zone, geo_enabled,
lang, contributors_enabled, is_translator, profile_background_color, profile_background_image_url, profile_background_image_url_https, profile_background_tile, profile_link_color, profile_sidebar_border_color, profile_sidebar_fill_color,
profile_text_color, profile_use_background_image, profile_image_url, profile_image_url_https, default_profile, default_profile_image, following, follow_request_sent, notifications):
self.id = _id
self.id_str = id_str
self.name = name
self.screen_name = screen_name
self.location = location
self.url = url
self.description = description
self.translator_type = translator_type
self.protected = protected
self.verified = verified
self.followers_count = followers_count
self.friends_count = friends_count
self.listed_count = listed_count
self.favourites_count = favourites_count
self.statuses_count = statuses_count
self.created_at = created_at
self.utc_offset = utc_offset
self.time_zone = time_zone
self.geo_enabled = geo_enabled
self.lang = lang
self.contributors_enabled = contributors_enabled
self.is_translator = is_translator
self.profile_background_color = profile_background_color
self.profile_background_image_url = profile_background_image_url
self.profile_background_image_url_https = profile_background_image_url_https
self.profile_background_tile = profile_background_tile
self.profile_link_color = profile_link_color
self.profile_sidebar_border_color = profile_sidebar_border_color
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_text_color = profile_text_color
self.profile_use_background_image = profile_use_background_image
self.profile_image_url = profile_image_url
self.profile_image_url_https = profile_image_url_https
self.default_profile = default_profile
self.default_profile_image = default_profile_image
self.following = following
self.follow_request_sent = follow_request_sent
self.notifications = notifications
class UserMentions:
def __init__(self, screen_name, name, _id, id_str):
self.screen_name = screen_name
self.name = name
self.id = _id
self.id_str = id_str
class TweetEntities:
def __init__(self, hashtags, urls, user_mentions, symbols):
self.hashtags = hashtags
self.urls = urls
self.user_mentions = user_mentions
self.symbols = symbols
class Tweet:
def __init__(self, created_at, _id, id_str, text, source, truncated, in_reply_to_status_id, in_reply_to_status_id_str, in_reply_to_user_id, in_reply_to_user_id_str, in_reply_to_screen_name, geo, coordinates, place, contributors, is_quote_status,
quote_count, reply_count, retweet_count, favorite_count, favorited, retweeted, filter_level, lang, timestamp_ms, user, entities):
self.created_at = created_at
self.id = _id
self.id_str = id_str
self.text = text
self.source = source
self.truncated = truncated
self.in_reply_to_status_id = in_reply_to_status_id
self.in_reply_to_status_id_str = in_reply_to_status_id_str
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_user_id_str = in_reply_to_user_id_str
self.in_reply_to_screen_name = in_reply_to_screen_name
self.user = user
self.geo = geo
self.coordinates = coordinates
self.place = place
self.contributors = contributors
self.is_quote_status = is_quote_status
self.quote_count = quote_count
self.reply_count = reply_count
self.retweet_count = retweet_count
self.favorite_count = favorite_count
self.entities = entities
self.favorited = favorited
self.retweeted = retweeted
self.filter_level = filter_level
self.lang = lang
self.timestamp_ms = timestamp_ms
def build_tweet(rd):
tweet = Tweet(
created_at=rd.get("created_at", None),
_id=rd.get("id", None),
id_str=rd.get("id_str", None),
text=rd.get("text", None),
source=rd.get("source", None),
truncated=rd.get("truncated", None),
in_reply_to_status_id=rd.get("in_reply_to_status_id", None),
in_reply_to_status_id_str=rd.get("in_reply_to_status_id_str", None),
in_reply_to_user_id=rd.get("in_reply_to_user_id", None),
in_reply_to_user_id_str=rd.get("in_reply_to_user_id_str", None),
in_reply_to_screen_name=rd.get("in_reply_to_screen_name", None),
user=TwitterUser(
_id=rd["user"].get("id", None),
id_str=rd["user"].get("id_str", None),
name=rd["user"].get("name", None),
screen_name=rd["user"].get("screen_name", None),
location=rd["user"].get("location", None),
url=rd["user"].get("url", None),
description=rd["user"].get("description", None),
translator_type=rd["user"].get("translator_type", None),
protected=rd["user"].get("protected", None),
verified=rd["user"].get("verified", None),
followers_count=rd["user"].get("followers_count", None),
friends_count=rd["user"].get("friends_count", None),
listed_count=rd["user"].get("listed_count", None),
favourites_count=rd["user"].get("favourites_count", None),
statuses_count=rd["user"].get("statuses_count", None),
created_at=rd["user"].get("created_at", None),
utc_offset=rd["user"].get("utc_offset", None),
time_zone=rd["user"].get("time_zone", None),
geo_enabled=rd["user"].get("geo_enabled", None),
lang=rd["user"].get("lang", None),
contributors_enabled=rd["user"].get("contributors_enabled", None),
is_translator=rd["user"].get("is_translator", None),
profile_background_color=rd["user"].get("profile_background_color", None),
profile_background_image_url=rd["user"].get("profile_background_image_url", None),
profile_background_image_url_https=rd["user"].get("profile_background_image_url_https", None),
profile_background_tile=rd["user"].get("profile_background_tile", None),
profile_link_color=rd["user"].get("profile_link_color", None),
profile_sidebar_border_color=rd["user"].get("profile_sidebar_border_color", None),
profile_sidebar_fill_color=rd["user"].get("profile_sidebar_fill_color", None),
profile_text_color=rd["user"].get("profile_text_color", None),
profile_use_background_image=rd["user"].get("profile_use_background_image", None),
profile_image_url=rd["user"].get("profile_image_url", None),
profile_image_url_https=rd["user"].get("profile_image_url_https", None),
default_profile=rd["user"].get("default_profile", None),
default_profile_image=rd["user"].get("default_profile_image", None),
following=rd["user"].get("following", None),
follow_request_sent=rd["user"].get("follow_request_sent", None),
notifications=rd["user"].get("notifications", None)
),
geo=rd.get("geo", None),
coordinates=rd.get("coordinates", None),
place=rd.get("place", None),
contributors=rd.get("contributors", None),
is_quote_status=rd.get("is_quote_status", None),
quote_count=rd.get("quote_count", None),
reply_count=rd.get("reply_count", None),
retweet_count=rd.get("retweet_count", None),
favorite_count=rd.get("favorite_count", None),
entities=TweetEntities(
hashtags=rd.get("entities", None).get("hashtags", None),
urls=rd.get("entities", None).get("urls", None),
user_mentions=rd.get("entities", None).get("user_mentions", None),
symbols=rd.get("entities", None).get("symbols", None)
),
favorited=rd.get("favorited", None),
retweeted=rd.get("retweeted", None),
filter_level=rd.get("filter_level", None),
lang=rd.get("lang", None),
timestamp_ms=rd.get("timestamp_ms", None)
)
return tweet
class TwitterStreamListener(tweepy.StreamListener):
def on_status(self, status):
logger.info("*~~> on_status", status)
def on_connect(self):
logger.info("*~~> on_connect")
def on_data(self, raw_data):
raw_data = json.loads(raw_data)
try:
raw_data["user"].get("id")
except KeyError:
return
send_tweet_to_discord(
build_tweet(
raw_data
)
)
def on_delete(self, status_id, user_id):
logger.info("*~~> on_delete", status_id, user_id)
def on_direct_message(self, status):
logger.info("*~~> on_direct_message", status)
def on_disconnect(self, notice):
logger.info("*~~> on_disconnect", notice)
def on_error(self, status_code):
if status_code == 401:
logger.info("*~~> Status Code: 401. Unable to authenticate!")
import sys
sys.exit()
else:
logger.info("*~~> on_error", status_code)
def on_event(self, status):
logger.info("*~~> on_event", status)
def on_exception(self, exception):
logger.info("*~~> on_exception", exception)
def on_friends(self, friends):
logger.info("*~~> on_friends")
def on_limit(self, track):
logger.info("*~~> on_limit", track)
def on_scrub_geo(self, notice):
logger.info("*~~> on_scrub_geo", notice)
def on_status_withheld(self, notice):
logger.info("*~~> on_status_withheld", notice)
def on_timeout(self):
logger.info("*~~> on_timeout")
def on_user_withheld(self, notice):
logger.info("*~~> on_user_withheld", notice)
def on_warning(self, notice):
logger.info("*~~> on_warning")
|
from typing import Union
import pandas as pd
import gframe
def cat_to_num(self, col : str) -> None:
"""Changes categories to binary columns
Args:
col (str): Column in DataFrame
drop (bool, optional): Should it drop original column. Defaults to False.
"""
categories = self.df[col].dropna().unique()
features = []
for cat in categories:
binary = (self.df[col] == cat)
self.df[cat] = binary.astype("int")
def fillna(self, median : bool = False, mean : bool = False, func = None,
categories : Union[str, list] = [], drop : bool = False ) -> pd.DataFrame:
"""fills null values in data frame with functions either custom or builtin.
Args:
-----
median (bool, optional):
fill null values with median. Defaults to False.
mean (bool, optional):
fill null values with mean. Defaults to False.
func (function, optional):
fill null values in DataFrame with custom function.
func takes parameters func(self.df, col).
categories (Union[str, list], optional):
changes columns to boolean columns. Defaults to [].
drop (bool, optional):
drops original column after making them boolean. Defaults to False.
Raises:
KeyError: If passed in column from list is not in DataFrame exception in raised
"""
for col in self.df:
if self.df[col].dtype in [float, int]:
if mean:
self.df[col].fillna(self.df[col].mean(), inplace = True)
elif median:
self.df[col].fillna(self.df[col].median(), inplace = True)
elif func != None:
func(self.df, col)
else:
self.df[col].fillna(-99999, inplace = True)
else:
try:
if type(categories) == list:
for cat in categories:
self.cat_to_num(cat)
else:
self.cat_to_num(categories)
except KeyError:
raise KeyError(' "{}" is not a column in the DataFrame'.format(cat))
if drop: self.df.drop(categories, axis=1, inplace=True)
return gframe.gframe(self.df)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 18:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('petitions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('update', models.BooleanField(default=True)),
('response', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.TextField()),
('display_name', models.CharField(blank=True, max_length=3)),
('affiliation', models.PositiveSmallIntegerField(default=1)),
('notifications', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profile.Notifications')),
('petitions_created', models.ManyToManyField(blank=True, related_name='profile_petitions_created', to='petitions.Petition')),
('petitions_signed', models.ManyToManyField(blank=True, related_name='profile_petitions_signed', to='petitions.Petition')),
('subscriptions', models.ManyToManyField(blank=True, related_name='profile_subscriptions', to='petitions.Petition')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Attempt to check each interface in nipype
"""
# Stdlib imports
import os
import re
import sys
import warnings
from nipype.interfaces.base import BaseInterface
import black
# Functions and classes
class InterfaceChecker(object):
"""Class for checking all interface specifications
"""
def __init__(
self,
package_name,
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None,
):
r""" Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
class_skip_patterns : None or sequence
Sequence of strings giving classes to be excluded
Default is: None
"""
if package_skip_patterns is None:
package_skip_patterns = ["\\.tests$"]
if module_skip_patterns is None:
module_skip_patterns = ["\\.setup$", "\\._"]
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
"""Set package_name"""
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
package_name = property(
get_package_name, set_package_name, None, "get/set package_name"
)
def _get_object_name(self, line):
name = line.split()[1].split("(")[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(":")
def _uri2path(self, uri):
"""Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
"""
if uri == self.package_name:
return os.path.join(self.root_path, "__init__.py")
path = uri.replace(".", os.path.sep)
path = path.replace(self.package_name + os.path.sep, "")
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + ".py"): # file
path += ".py"
elif os.path.exists(os.path.join(path, "__init__.py")):
path = os.path.join(path, "__init__.py")
else:
return None
return path
def _path2uri(self, dirpath):
""" Convert directory path to uri """
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, ".")
def _parse_module(self, uri):
""" Parse module defined in *uri* """
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, "rt")
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
""" Parse lines of text for functions and classes """
functions = []
classes = []
for line in linesource:
if line.startswith("def ") and line.count("("):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith("_"):
functions.append(name)
elif line.startswith("class "):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith("_") and self._survives_exclude(
".".join((module, name)), "class"
):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
@classmethod
def _normalize_repr(cls, value):
if isinstance(value, list):
return "[{}]".format(", ".join(map(cls._normalize_repr, value)))
if isinstance(value, tuple):
if len(value) == 1:
return "({},)".format(cls._normalize_repr(value[0]))
return "({})".format(", ".join(map(cls._normalize_repr, value)))
if isinstance(value, (str, bytes)):
value = repr(value)
if value[0] not in ('"', "'"):
value = value[1:]
else:
value = repr(value)
return value
def test_specs(self, uri):
"""Check input and output specs in an uri
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
"""
# get the names of all classes and functions
_, classes = self._parse_module(uri)
if not classes:
# print 'WARNING: Empty -',uri # dbg
return None
# Make a shorter version of the uri that omits the package name for
# titles
allowed_keys = [
"desc",
"genfile",
"xor",
"requires",
"desc",
"nohash",
"argstr",
"position",
"mandatory",
"copyfile",
"usedefault",
"sep",
"hash_files",
"deprecated",
"new_name",
"min_ver",
"max_ver",
"name_source",
"name_template",
"keep_extension",
"units",
"output_name",
"extensions",
]
in_built = [
"type",
"copy",
"parent",
"instance_handler",
"comparison_mode",
"array",
"default",
"editor",
]
bad_specs = []
for c in classes:
__import__(uri)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception:
continue
if not issubclass(classinst, BaseInterface):
continue
testdir = os.path.join(*(uri.split(".")[:-1] + ["tests"]))
if not os.path.exists(testdir):
os.makedirs(testdir)
nonautotest = os.path.join(testdir, "test_%s.py" % c)
testfile = os.path.join(testdir, "test_auto_%s.py" % c)
if os.path.exists(testfile):
os.unlink(testfile)
if not os.path.exists(nonautotest):
cmd = [
"# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT",
"from ..%s import %s" % (uri.split(".")[-1], c),
"",
]
cmd.append("\ndef test_%s_inputs():" % c)
input_fields = ""
for traitname, trait in sorted(
classinst.input_spec().traits(transient=None).items()
):
input_fields += "%s=dict(" % traitname
for key, value in sorted(trait.__dict__.items()):
if key in in_built or key == "desc":
continue
input_fields += "%s=%s,\n " % (
key,
self._normalize_repr(value),
)
input_fields += "),\n "
cmd += [" input_map = dict(%s)" % input_fields]
cmd += [" inputs = %s.input_spec()" % c]
cmd += [
"""
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value"""
]
fmt_cmd = black.format_str("\n".join(cmd), mode=black.FileMode())
with open(testfile, "wt") as fp:
fp.writelines(fmt_cmd)
else:
print("%s has nonautotest" % c)
for traitname, trait in sorted(
classinst.input_spec().traits(transient=None).items()
):
for key in sorted(trait.__dict__):
if key in in_built:
continue
parent_metadata = []
if "parent" in trait.__dict__:
parent_metadata = list(getattr(trait, "parent").__dict__.keys())
if (
key
not in allowed_keys
+ classinst._additional_metadata
+ parent_metadata
):
bad_specs.append([uri, c, "Inputs", traitname, key])
if (
key == "mandatory"
and trait.mandatory is not None
and not trait.mandatory
):
bad_specs.append(
[uri, c, "Inputs", traitname, "mandatory=False"]
)
if key == "usedefault" and trait.__dict__[key] == False:
bad_specs.append(
[uri, c, "Inputs", traitname, "usedefault=False"]
)
# checking if traits that have default_value different that the trits default one
# also have `usedefault` specified;
# excluding TraitCompound
# excluding Enum: always has default value (the first value)
# excluding Tuple: takes tuple of inner traits default values as default, but doesn't use it
# for Range assuming that if default == low, it's likely that usedefault should be False
# (for Range traits takes low as a default default
if (
trait.trait_type.__class__.__name__
not in ["TraitCompound", "Tuple", "Enum"]
and trait.default
and "usedefault" not in trait.__dict__
and "requires" not in trait.__dict__
and "xor" not in trait.__dict__
):
if (
trait.trait_type.__class__.__name__ == "Range"
and trait.default == trait.trait_type._low
):
continue
bad_specs.append(
[
uri,
c,
"Inputs",
traitname,
"default value is set, no value for usedefault",
]
)
if not classinst.output_spec:
continue
if not os.path.exists(nonautotest):
cmd = ["\ndef test_%s_outputs():" % c]
input_fields = ""
for traitname, trait in sorted(
classinst.output_spec().traits(transient=None).items()
):
input_fields += "%s=dict(" % traitname
for key, value in sorted(trait.__dict__.items()):
if key in in_built or key == "desc":
continue
input_fields += "%s=%s,\n " % (
key,
self._normalize_repr(value),
)
input_fields += "),\n "
cmd += [" output_map = dict(%s)" % input_fields]
cmd += [" outputs = %s.output_spec()" % c]
cmd += [
"""
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value"""
]
fmt_cmd = black.format_str("\n".join(cmd), mode=black.FileMode())
with open(testfile, "at") as fp:
fp.writelines("\n\n" + fmt_cmd)
for traitname, trait in sorted(
classinst.output_spec().traits(transient=None).items()
):
for key in sorted(trait.__dict__):
if key in in_built:
continue
parent_metadata = []
if "parent" in trait.__dict__:
parent_metadata = list(getattr(trait, "parent").__dict__.keys())
if (
key
not in allowed_keys
+ classinst._additional_metadata
+ parent_metadata
):
bad_specs.append([uri, c, "Outputs", traitname, key])
return bad_specs
def _survives_exclude(self, matchstr, match_type):
""" Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
"""
if match_type == "module":
patterns = self.module_skip_patterns
elif match_type == "package":
patterns = self.package_skip_patterns
elif match_type == "class":
patterns = self.class_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"' % match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
""" Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
"""
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path, dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = ".".join((root_uri, dirname))
if self._uri2path(package_uri) and self._survives_exclude(
package_uri, "package"
):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = ".".join((root_uri, module_name))
if self._uri2path(module_uri) and self._survives_exclude(
module_uri, "module"
):
modules.append(module_uri)
return sorted(modules)
def check_modules(self):
# write the list
modules = self.discover_modules()
checked_modules = []
for m in modules:
bad_specs = self.test_specs(m)
if bad_specs:
checked_modules.extend(bad_specs)
for bad_spec in checked_modules:
print(":".join(bad_spec))
if __name__ == "__main__":
package = "nipype"
ic = InterfaceChecker(package)
# Packages that should not be included in generated API docs.
ic.package_skip_patterns += [
"\.external$",
"\.fixes$",
"\.utils$",
"\.pipeline",
"\.testing",
"\.caching",
"\.workflows",
]
"""
# Modules that should not be included in generated API docs.
ic.module_skip_patterns += ['\.version$',
'\.interfaces\.base$',
'\.interfaces\.matlab$',
'\.interfaces\.rest$',
'\.interfaces\.pymvpa$',
'\.interfaces\.slicer\.generate_classes$',
'\.interfaces\.spm\.base$',
'\.interfaces\.traits',
'\.pipeline\.alloy$',
'\.pipeline\.s3_node_wrapper$',
'.\testing',
]
ic.class_skip_patterns += ['AFNI',
'ANTS',
'FSL',
'FS',
'Info',
'^SPM',
'Tester',
'Spec$',
'Numpy',
'NipypeTester',
]
"""
ic.check_modules()
|
import torch
import sentencepiece as spm
from bert.model import MovieClassification, BERTPretrain
from bert.data import MovieDataSet, movie_collate_fn, PretrainDataSet, make_pretrain_data, pretrin_collate_fn
from config import Config
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from IPython.display import display
import pandas as pd
from modules import enc_input_to_sentence, test_one
import os
################################################################################################################################
# vocab loading
data_dir = "data"
vocab_file = f"{data_dir}/kowiki.model"
vocab = spm.SentencePieceProcessor()
vocab.load(vocab_file)
################################################################################################################################
""" 데이터 로더 """
batch_size = 128
train_dataset = MovieDataSet(vocab, f"{data_dir}/ratings_train.json")
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=movie_collate_fn)
test_dataset = MovieDataSet(vocab, f"{data_dir}/ratings_test.json")
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=movie_collate_fn)
# for labels, enc_inputs, dec_inputs in train_loader:
# print(labels)
# print(enc_inputs)
# print(dec_inputs)
# print(labels.shape, enc_inputs.shape, dec_inputs.shape)
# for i in range(10):
# print(vocab.IdToPiece(i), end=" ")
# print()
# for idx in range(len(enc_inputs)):
# sentence = enc_input_to_sentence(enc_inputs, idx)
# input(sentence)
# break
################################################################################################################################
""" 모델 epoch 학습 """
def train_epoch(config, epoch, model, criterion_cls, optimizer, train_loader):
losses = []
model.train()
with tqdm(total=len(train_loader), desc=f"Train({epoch})") as pbar:
for i, value in enumerate(train_loader):
labels, inputs, segments = map(lambda v: v.to(config.device), value)
optimizer.zero_grad()
outputs = model(inputs, segments)
logits_cls = outputs[0]
loss_cls = criterion_cls(logits_cls, labels)
loss = loss_cls
loss_val = loss_cls.item()
losses.append(loss_val)
loss.backward()
optimizer.step()
pbar.update(1)
pbar.set_postfix_str(f"Loss: {loss_val:.3f} ({np.mean(losses):.3f})")
return np.mean(losses)
""" 모델 epoch 평가 """
def eval_epoch(config, model, data_loader):
matchs = []
model.eval()
n_word_total = 0
n_correct_total = 0
with tqdm(total=len(data_loader), desc=f"Valid") as pbar:
for i, value in enumerate(data_loader):
labels, inputs, segments = map(lambda v: v.to(config.device), value)
outputs = model(inputs, segments)
logits_cls = outputs[0]
_, indices = logits_cls.max(1)
match = torch.eq(indices, labels).detach()
matchs.extend(match.cpu())
accuracy = np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0
pbar.update(1)
pbar.set_postfix_str(f"Acc: {accuracy:.3f}")
return np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0
################################################################################################################################
config = Config({
"n_enc_vocab": len(vocab),
"n_enc_seq": 256,
"n_seg_type": 2,
"n_layer": 2, # 6 (default value)
"d_hidn": 256,
"i_pad": 0,
"d_ff": 512, # 1024 (default value)
"n_head": 4,
"d_head": 64,
"dropout": 0.1,
"layer_norm_epsilon": 1e-12
})
print(config)
config.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config.n_output = 2
learning_rate = 5e-5
n_epoch = 1 # 10 (default value)
################################################################################################################################
model = BERTPretrain(config)
save_pretrain = f"{data_dir}/save_bert_pretrain.pth"
best_epoch, best_loss = 0, 0
if os.path.isfile(save_pretrain):
best_epoch, best_loss = model.bert.load(save_pretrain, map_location=config.device)
print(f"load pretrain from: {save_pretrain}, epoch={best_epoch}, loss={best_loss}")
best_epoch += 1
model.to(config.device)
criterion_lm = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction='mean')
criterion_cls = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
losses = []
offset = best_epoch
for step in range(n_epoch):
epoch = step + offset
if 0 < step:
del train_loader
dataset = PretrainDataSet(vocab, f"{data_dir}/kowiki_bert_{epoch % count}.json")
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=pretrin_collate_fn)
loss = train_epoch(config, epoch, model, criterion_lm, criterion_cls, optimizer, train_loader)
losses.append(loss)
model.bert.save(epoch, loss, save_pretrain)
################################################################################################################################
def train(model):
model.to(config.device)
criterion_cls = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
best_epoch, best_loss, best_score = 0, 0, 0
losses, scores = [], []
for epoch in range(n_epoch):
loss = train_epoch(config, epoch, model, criterion_cls, optimizer, train_loader)
score = eval_epoch(config, model, test_loader)
losses.append(loss)
scores.append(score)
if best_score < score:
best_epoch, best_loss, best_score = epoch, loss, score
print(f">>>> epoch={best_epoch}, loss={best_loss:.5f}, socre={best_score:.5f}")
return losses, scores
# model = MovieClassification(config)
# losses_00, scores_00 = train(model)
model = MovieClassification(config)
model.bert.load(save_pretrain, map_location=config.device)
losses_20, scores_20 = train(model)
################################################################################################################################
try:
model.save("bert_tutorial.pth")
except:
torch.save(model.state_dict(), "bert_tutorial.pth")
################################################################################################################################
test_one(MovieClassification, config, test_loader, "bert_tutorial.pth")
# # table
# data = {
# "loss_00": losses_00,
# "socre_00": scores_00,
# "loss_20": losses_20,
# "socre_20": scores_20,
# }
# df = pd.DataFrame(data)
# display(df)
# # graph
# plt.figure(figsize=[12, 4])
# plt.plot(scores_00, label="score_00")
# plt.plot(scores_20, label="score_20")
# plt.legend()
# plt.xlabel('Epoch')
# plt.ylabel('Value')
# plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: game.py
# -------------------
# Divine Oasis
# Text Based RPG Game
# By wsngamerz
# -------------------
import divineoasis
import logging
import logging.config
import os
import platform
import pyglet
import sys
from divineoasis.assets import Assets, Directories
from divineoasis.config import Config
from divineoasis.colours import Colours
from divineoasis.scene_manager import SceneManager
from pyglet.window import Window
class DivineOasis:
def __init__(self, debug: bool = False):
self.debug = debug
if self.debug:
if platform.system() == "Windows":
# Set larger console
os.system("mode con: cols=200 lines=9999")
if platform.system() != "Linux":
import pyglet_ffmpeg
pyglet_ffmpeg.load_ffmpeg()
# Enable Colours using black magic
os.system("")
# Setup Logging
self.game_logger = self.setup_logging(debug)
# Get basic system information
self.system_data = {}
self.system_info()
# Basic classes
self.game_config = Config()
self.game_config.load()
self.game_assets = Assets(self.game_config.get("language.lang"))
# setup Pyglet
pyglet.options['audio'] = ('openal', 'pulse', 'directsound', 'silent')
vsync_enabled = self.game_config.get("graphics.vsync")
self.window = Window(1280, 720)
self.window.set_vsync(vsync_enabled)
# TODO: Fix fullscreen mode
# self.window.set_fullscreen(self.game_config.get("fullscreen"))
self.window.set_caption(self.game_assets.get("lang.title.main_title"))
fps_limit = self.game_config.get("graphics.fps")
self.scene_manager = SceneManager(self.game_assets, self.window)
if vsync_enabled:
pyglet.clock.schedule(self.scene_manager.update)
else:
pyglet.clock.schedule_interval(self.scene_manager.update, 1.0 / fps_limit)
def start(self):
self.game_logger.info(f"Starting Divine Oasis { divineoasis.__version__ }")
# Start Pyglet loop
pyglet.app.run()
@staticmethod
def setup_logging(debug: bool):
if debug:
level = "DEBUG"
else:
level = "INFO"
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
}
},
"handlers": {
"default": {
"class": "logging.StreamHandler",
"formatter": "standard"
}
},
"loggers": {
"": {
"handlers": ["default"],
"propagate": True,
"level": level
}
}
})
logging.addLevelName(logging.DEBUG, Colours.BOLD + Colours.BRIGHT_CYAN + "DEBUG" + Colours.RESET)
logging.addLevelName(logging.INFO, Colours.BOLD + Colours.BRIGHT_BLUE + "INFO" + Colours.RESET)
logging.addLevelName(logging.WARNING, Colours.BOLD + Colours.BRIGHT_YELLOW + "WARNING" + Colours.RESET)
logging.addLevelName(logging.ERROR, Colours.BOLD + Colours.BRIGHT_RED + "ERROR" + Colours.RESET)
logging.addLevelName(logging.CRITICAL, Colours.BOLD + Colours.BRIGHT_RED + Colours.BLINK + "CRITICAL" + Colours.RESET)
return logging.getLogger(__name__)
def system_info(self):
self.system_data = {
"arguments": sys.argv,
"python_version": sys.version,
"os": platform.system(),
"os_release": platform.release(),
"os_version": platform.version(),
"os_arch": platform.machine(),
"os_platform": platform.platform()
}
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*= Debug Information =*=*=*=*=*=*=*=*=*=*=*=")
self.game_logger.debug(f" Arguments: { self.system_data['arguments'] }")
self.game_logger.debug(f" Python Version: { self.system_data['python_version'] }")
self.game_logger.debug(f" OS: { self.system_data['os'] }")
self.game_logger.debug(f" OS Version: { self.system_data['os_version'] }")
self.game_logger.debug(f" OS Release: { self.system_data['os_release'] }")
self.game_logger.debug(f" OS Architecture: { self.system_data['os_arch'] }")
self.game_logger.debug(f" OS Platform: { self.system_data['os_platform'] }")
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=* Directories *=*=*=*=*=*=*=*=*=*=*=*=*=")
self.game_logger.debug(f" Application Root: { Directories().application_root }")
self.game_logger.debug(f" Assets Directory: { Directories().assets_directory }")
self.game_logger.debug(f" Data Directory: { Directories().data_directory }")
self.game_logger.debug(f" Config Location: { Directories().config_location }")
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=")
|
"""Grype SCA and Container tool class"""
import shlex
from pydash import py_
from eze.core.enums import VulnerabilityType, VulnerabilitySeverityEnum, ToolType, SourceType, Vulnerability
from eze.utils.cli import extract_cmd_version, run_async_cli_command
from eze.core.tool import ToolMeta, ScanResult
from eze.utils.io import create_tempfile_path, write_text, parse_json
from eze.utils.log import log_error
class GrypeTool(ToolMeta):
"""SCA and Container scanning tool Grype tool class"""
TOOL_NAME: str = "anchore-grype"
TOOL_TYPE: ToolType = ToolType.SCA
SOURCE_SUPPORT: list = [SourceType.RUBY, SourceType.NODE, SourceType.JAVA, SourceType.PYTHON, SourceType.CONTAINER]
SHORT_DESCRIPTION: str = "opensource multi language SCA and container scanner"
INSTALL_HELP: str = """In most cases all that is required to install grype via apt-get or docker
As of writing, no native windows 10 grype exists, can be run via wsl2"""
MORE_INFO: str = """https://github.com/anchore/grype
Tips
===========================
- use slim versions of base images
- always create a application user for running entry_point and cmd commands
- read https://owasp.org/www-project-docker-top-10/
Common Gotchas
===========================
Worth mentioning vulnerability counts are quite high for official out the box docker images
trivy image node:slim
Total: 101 (UNKNOWN: 2, LOW: 67, MEDIUM: 8, HIGH: 20, CRITICAL: 4)
trivy image python:3.8-slim
Total: 112 (UNKNOWN: 2, LOW: 74, MEDIUM: 11, HIGH: 21, CRITICAL: 4)
"""
# https://github.com/anchore/grype/blob/main/LICENSE
LICENSE: str = """Apache-2.0"""
EZE_CONFIG: dict = {
"SOURCE": {
"type": str,
"default": ".",
"help_text": """By default it is "." aka local folder
From grype help
Supports the following image sources:
grype yourrepo/yourimage:tag defaults to using images from a Docker daemon
grype path/to/yourproject a Docker tar, OCI tar, OCI directory, or generic filesystem directory
You can also explicitly specify the scheme to use:
grype docker:yourrepo/yourimage:tag explicitly use the Docker daemon
grype docker-archive:path/to/yourimage.tar use a tarball from disk for archives created from "docker save"
grype oci-archive:path/to/yourimage.tar use a tarball from disk for OCI archives (from Podman or otherwise)
grype oci-dir:path/to/yourimage read directly from a path on disk for OCI layout directories (from Skopeo or otherwise)
grype dir:path/to/yourproject read directly from a path on disk (any directory)
grype sbom:path/to/syft.json read Syft JSON from path on disk
grype registry:yourrepo/yourimage:tag pull image directly from a registry (no container runtime required)""",
"help_example": """python""",
},
"CONFIG_FILE": {
"type": str,
"help_text": """Grype config file location, by default Empty, maps to grype argument
-c, --config string application config file""",
},
"GRYPE_IGNORE_UNFIXED": {
"type": bool,
"default": False,
"help_text": """if true ignores state = "not-fixed""" "",
},
"REPORT_FILE": {
"type": str,
"default": create_tempfile_path("tmp-grype-report.json"),
"default_help_value": "<tempdir>/.eze-temp/tmp-grype-report.json",
"help_text": "output report location (will default to tmp file otherwise)",
},
}
TOOL_CLI_CONFIG = {
"CMD_CONFIG": {
# tool command prefix
"BASE_COMMAND": shlex.split("grype -o=json"),
# eze config fields -> arguments
"TAIL_ARGUMENTS": ["SOURCE"],
# eze config fields -> flags
"FLAGS": {"CONFIG_FILE": "-c="},
}
}
@staticmethod
def check_installed() -> str:
"""Method for detecting if tool installed and ready to run scan, returns version installed"""
version = extract_cmd_version(["grype", "version"])
return version
async def run_scan(self) -> ScanResult:
"""
Method for running a synchronous scan using tool
:raises EzeError
"""
completed_process = await run_async_cli_command(self.TOOL_CLI_CONFIG["CMD_CONFIG"], self.config, self.TOOL_NAME)
report_text = completed_process.stdout
write_text(self.config["REPORT_FILE"], report_text)
report_events = parse_json(report_text)
report = self.parse_report(report_events)
if completed_process.stderr:
report.warnings.append(completed_process.stderr)
return report
def grype_severity_to_cwe_severity(self, grype_severity: str) -> str:
"""convert grype severities into standard cvss severity
as per
https://semgrep.dev/docs/writing-rules/rule-syntax/#schema
https://nvd.nist.gov/vuln-metrics/cvss"""
grype_severity = grype_severity.lower()
has_severity = hasattr(VulnerabilitySeverityEnum, grype_severity)
if not has_severity:
if grype_severity == "negligible":
return VulnerabilitySeverityEnum.na.name
log_error(f"unknown trivy severity '${grype_severity}', defaulting to na")
return VulnerabilitySeverityEnum.na.name
return VulnerabilitySeverityEnum[grype_severity].name
def parse_report(self, parsed_json: list) -> ScanResult:
"""convert report json into ScanResult"""
grype_matches = py_.get(parsed_json, "matches", [])
vulnerabilities_list = []
dup_key_list = {}
for grype_match in grype_matches:
is_unfixed = py_.get(grype_match, "vulnerability.fix.state", "") == "not-fixed"
if self.config["GRYPE_IGNORE_UNFIXED"] and is_unfixed:
continue
references = py_.get(grype_match, "vulnerability.urls", [])
source_url = py_.get(grype_match, "vulnerability.dataSource", None)
if source_url and source_url not in references:
references.insert(0, source_url)
grype_severity = py_.get(grype_match, "vulnerability.severity", [])
severity = self.grype_severity_to_cwe_severity(grype_severity)
language = py_.get(grype_match, "artifact.language", None)
if not language:
language = "container"
file_location = None
vulnerable_package = py_.get(grype_match, "artifact.name", None)
installed_version = py_.get(grype_match, "artifact.version", None)
fixed_version = py_.get(grype_match, "vulnerability.fix.versions[0]", None)
recommendation = ""
if fixed_version:
recommendation = f"Update {vulnerable_package} ({installed_version}) to a non vulnerable version, fix version: {fixed_version}"
identifiers = {}
identifier_id = py_.get(grype_match, "vulnerability.id", None)
if identifier_id.startswith("CVE"):
identifiers["cve"] = identifier_id
elif identifier_id.startswith("GHSA"):
identifiers["ghsa"] = identifier_id
overview = py_.get(grype_match, "vulnerability.description", [])
related_vulnerability = py_.get(grype_match, "relatedVulnerabilities[0].id", None)
if related_vulnerability and related_vulnerability == identifier_id and not recommendation:
overview = py_.get(grype_match, "relatedVulnerabilities[0].description", None)
unique_key = f"{vulnerable_package}_{severity}_{installed_version}"
if dup_key_list.get(unique_key):
continue
dup_key_list[unique_key] = True
vulnerability_raw = {
"vulnerability_type": VulnerabilityType.dependency.name,
"name": vulnerable_package,
"version": installed_version,
"overview": overview,
"recommendation": recommendation,
"language": language,
"severity": severity,
"identifiers": identifiers,
"file_location": file_location,
"references": references,
"metadata": None,
}
vulnerability = Vulnerability(vulnerability_raw)
vulnerabilities_list.append(vulnerability)
report = ScanResult(
{
"tool": self.TOOL_NAME,
"vulnerabilities": vulnerabilities_list,
}
)
return report
|
import numpy as np
import pickle
import os
import matplotlib.pyplot as plt
import networkx as nx
base_file_path = os.path.abspath(os.path.join(os.curdir, '..', '..', '..')) # should point to the level above the src directory
data_path = os.path.join(base_file_path, 'data', 'Intercity_Dallas')
county_data = pickle.load(open(os.path.join(data_path, 'data_processing_outputs', 'county_data.p'), 'rb'))
adjacency_matrix = np.load(os.path.join(data_path, 'data_processing_outputs', 'adjacency_matrix_5.npy'))
county_list = list(county_data.keys())
num_counties = len(county_list)
# Entity indexes
# 0 - groceries
# 1 - fitness
# 2 - pharmacy
# 3 - physician
# 4 - hotel
# 5 - religion
# 6 - restaurant
entity_list = ['grocery_demand_dest',
'fitness_demand_dest',
'pharmacy_demand_dest',
'physician_demand_dest',
'hotel_demand_dest',
'restaurant_demand_dest']
num_entities = len(entity_list)
edge_weights = np.zeros((num_counties, num_counties, num_entities), dtype=np.float)
for county_ind in range(num_counties):
for entity_ind in range(num_entities):
city_name = county_list[county_ind]
entity_name = entity_list[entity_ind]
edge_weights[county_ind, :, entity_ind] = np.array(county_data[city_name][entity_name])
# Prune edge weights corresponding to missing edges in pre-calculated adjacency matrix
for entity_ind in range(num_entities):
edge_weights[:, :, entity_ind] = np.multiply(adjacency_matrix, edge_weights[:, :, entity_ind]) # Use the adjacency matrix as a mask
for county_ind in range(num_counties):
for entity_ind in range(num_entities):
# weights are only considered to determine likelihood of moving to ANOTHER region.
# Should not take self-loops into account.
edge_weights[county_ind, county_ind, entity_ind] = 0.0
#normalize remaining edges to sum to 1
if not (np.sum(edge_weights[county_ind, :, entity_ind]) == 0.0):
edge_weights[county_ind, :, entity_ind] = edge_weights[county_ind, :, entity_ind] / np.sum(edge_weights[county_ind, :, entity_ind])
# If no edge weight data exists, set weights to be uniform over the outgoing edges in the adjacency matrix
else:
num_outgoing_edges = np.sum(adjacency_matrix[county_ind, :])
if adjacency_matrix[county_ind, county_ind] == 1:
num_outgoing_edges = num_outgoing_edges - 1
for adj_ind in range(num_counties):
mask_val = adjacency_matrix[county_ind, adj_ind]
if mask_val == 1.0 and not (adj_ind == county_ind):
edge_weights[county_ind, adj_ind, entity_ind] = 1 / num_outgoing_edges
else:
edge_weights[county_ind, adj_ind, entity_ind] = 0.0
for county_ind in range(num_counties):
for entity_ind in range(num_entities):
if np.sum(edge_weights[county_ind, :, entity_ind]) - 1.0 <= 1e-8:
pass
else:
print('city name: {}, entity name: {}, sum of edge weights: {}'.format(county_list[county_ind],
entity_list[entity_ind],
np.sum(edge_weights[county_ind, :, entity_ind])))
np.save(os.path.join(data_path, 'data_processing_outputs', 'edge_weights_5.npy'), edge_weights)
print(edge_weights)
# entity = 5
# # Visualize the resulting adjacency matrix
# G = nx.DiGraph()
# G_fully_connected = nx.DiGraph()
# for i in range(num_counties):
# G.add_node(county_list[i])
# G_fully_connected.add_node(county_list[i])
# for j in range(num_counties):
# G_fully_connected.add_edge(county_list[i], county_list[j])
# if edge_weights[i, j, entity] >= 1e-4:
# G.add_edge(county_list[i], county_list[j])
# pos = dict()
# for i in range(num_counties):
# county = county_list[i]
# # x_loc = county_data[city]['x_loc']
# # y_loc = county_data[city]['y_loc']
# # pos[city] = np.array([x_loc, y_loc])
# pos[county] = np.array([np.random.normal(), np.random.normal()])
# nx.draw_networkx_nodes(G, pos)
# nx.draw_networkx_labels(G, pos)
# # nx.draw_networkx_edges(G_fully_connected, pos, edge_color='red')
# nx.draw_networkx_edges(G, pos, edge_color='black')
# plt.show()
|
from typing import Tuple, Union
import numpy as np
from abito.lib.utils import _quantile_is_valid
def _argquantile_weighted(
weights_sorted: np.ndarray,
q: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
inds = []
cuts = []
q = np.asanyarray(q)
weight_qs = [int(q * weights_sorted.sum()) for q in q.flatten()]
cur_weight_q = 0
weight_q = weight_qs.pop(0)
for cur_weight_ind, cur_weight in enumerate(weights_sorted):
cur_weight_q += cur_weight
while weight_q <= cur_weight_q:
inds.append(cur_weight_ind)
cuts.append(cur_weight - (cur_weight_q - weight_q))
if weight_qs:
weight_q = weight_qs.pop(0)
else:
weight_q = -1
break
if weight_q < 0:
break
inds = np.asanyarray(inds).reshape(q.shape)
cuts = np.asanyarray(cuts).reshape(q.shape)
return inds, cuts
def _argtrim_plain(
ar: np.ndarray,
q: np.ndarray,
**kwargs
):
trim_inds = (q * ar.shape[0]).astype('int')
ind = np.argpartition(ar, trim_inds, **kwargs)
return ind[trim_inds[0]:trim_inds[1]]
def _argtrim_weighted(
ar: np.ndarray,
weights: np.ndarray,
q: np.ndarray,
**kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, None]]:
weights = weights.copy()
ind_sorted = ar.argsort(**kwargs)
weights = weights[ind_sorted]
(lowercut_a, uppercut_a), (lowercut_w, uppercut_w) = _argquantile_weighted(weights, q)
weights[uppercut_a] = uppercut_w
weights[lowercut_a] -= lowercut_w
ind_sorted = ind_sorted[lowercut_a:uppercut_a + 1]
weights = weights[lowercut_a:uppercut_a + 1]
return ind_sorted, weights
def argtrim(
ar: np.ndarray,
weights: np.ndarray = np.empty(0),
ltrim: float = 0,
rtrim: float = 0,
**kwargs
):
q = np.array([ltrim, 1 - rtrim])
_quantile_is_valid(q)
if weights.shape[0] == 0:
ind = _argtrim_plain(ar, q, **kwargs)
else:
ind, weights = _argtrim_weighted(ar, weights, q, **kwargs)
return ind, weights
|
import numpy as np
from BDQuaternions import UnitQuaternion
from BDQuaternions import functions as qt
q1 = UnitQuaternion()
q2 = UnitQuaternion(np.array([0, 1, 0, 0], dtype=np.double))
print('BDQuaternions:', q1, q2)
print('Scalar part:', q1.scalar_part())
print('Vector part:', q1.vector_part())
print('q1* =', q1.conjugate())
print('q2* =', q2.conjugate())
print('q1 + q2 =', q1 + q2)
print('q1 - q2 =', q1 - q2)
print('q1 * q2 =', q1 * q2)
print('q1 == q2:', q1 == q2, '\tq1 == q1:', q1 == q1)
print('||q1|| =', q1.norm)
print('||q2|| =', q2.norm)
print('distance d(q1,q2) =', q1.distance(q2))
print('q1 versor =', q1.versor())
print('q2 versor =', q2.versor())
print('q1 reciprocal =', q1.reciprocal())
print('q2 reciprocal =', q2.reciprocal())
print('q1 / 3 =', q1 / 3)
print('3 / q2 =', 3 / q2)
print('q1 real matrix:\n', q1.real_matrix())
print('q1 complex matrix:\n', q1.complex_matrix())
print('q1 polar representation:', q1.polar)
print('q2 polar representation:', q2.polar)
print('q1^3 =', q1 ** 3)
print('q2^3 =', q2 ** 3)
print('exp(q1) =', qt.exp(q1))
print('exp(q2) =', qt.exp(q2))
print('log(q1) =', qt.log(q1))
print('log(q2) =', qt.log(q2))
print('exp(log(q1)) =', qt.exp(qt.log(q1)))
print('exp(log(q2)) =', qt.exp(qt.log(q2)))
|
# Scraper for Oklahoma Court of Criminal Appeals
#CourtID: oklacrimapp
#Court Short Name: OK
#Author: Andrei Chelaru
#Reviewer: mlr
#Date: 2014-07-05
from datetime import date
from juriscraper.opinions.united_states.state import okla
class Site(okla.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
d = date.today()
self.url = 'http://www.oscn.net/applications/oscn/Index.asp?ftdb=STOKCSCR&year={year}&level=1'.format(
year=d.year
)
|
import plotly.express as px
import matplotlib.pyplot as plt
import streamlit as st
import pandas as pd
def pareto_plot(df_abc):
fig = px.line(
data_frame=df_abc,
width=800,
height=600,
x='SKU_%',
y='QTY%_CS',
labels={ "SKU_%": 'Percentage of SKU (%)',
"QTY%_CS": 'Percentage of the Quantity (%)'}) #, title="ABC Analysis: Distribution by Quantity"
# 5%, 20% of SKU (A and B)
fig.add_vline(x=5, line_width=1, line_dash="dot", line_color="red")
fig.add_vline(x=20, line_width=1, line_dash="dot", line_color="red")
# 20%, 50% of SKU Number
fig.add_hline(y=80, line_width=1, line_dash="dot", line_color="blue")
fig.add_hline(y=95, line_width=1, line_dash="dot", line_color="blue")
# Quick Analysis
nsku_qty80 = round((df_abc[df_abc['QTY%_CS'] > 80]['SKU_%'].values[0]),2)
qty_nsku20 = round((df_abc[df_abc['SKU_%'] > 20]['QTY%_CS'].values[0]),2)
st.write(fig)
return nsku_qty80, qty_nsku20
def abc_analysis(df, interval, list_family, family_col):
fig = px.scatter(
data_frame=df[df[family_col].isin(list_family)],
width=800,
height=600,
x='QTY%',
y='CV',
color ='ABC',
labels={ "QTY%": 'Percentage of Quantity (%)',
"CV": 'Coefficient of Variation (σ/μ)'}) # ,title="Distribution by Demand Variability"
colors = {'A':'red', 'B':'green', 'C':'blue'}
# ABC
n_sku = len(df)
n_a, n_b = int(0.05*n_sku), int(0.5*n_sku)
# A, B, C on turnover
to_a, to_b = df[df['SKU_ID']==n_a]['QTY%'].max(), df[df['SKU_ID']==n_b]['QTY%'].max()
# A, B and C
fig.add_vline(to_a , line_width=1, line_dash="dot", line_color="red")
fig.add_vline(to_b , line_width=1, line_dash="dot", line_color="red")
# CV = 1
fig.add_hline(1 , line_width=1, line_dash="dot", line_color="black")
# Set limit in CV
fig.update(layout_yaxis_range = [0,interval])
st.write(fig)
def abc_barplot(df_abc, family_col, metric_col):
# BAR PLOT OF SKU DISTRIBUTION BY FAMILY AND ABC CLASS
df_dist = pd.DataFrame(df_abc[[family_col,'ABC', 'QTY']].groupby(
[family_col,'ABC'])['QTY'].count()).reset_index()
# Simple histogram
fig = px.bar(data_frame=df_dist,
width=800,
height=600,
x=family_col,
y = 'QTY',
color = 'ABC',
labels={ family_col: 'Split by {}'.format(family_col),
metric_col: 'Number of SKU'}, barmode = "group")
fig.update_traces(marker_line_width=1,marker_line_color="black")
st.write(fig)
def normality_test(df, interval,family_col):
fig = px.scatter(
data_frame=df,
width=800,
height=600,
x='CV',
y='%MAX/MEAN',
color ='NOT_NORMAL',
labels={ "QTY%": 'Percentage of Quantity (%)',
"CV": 'Coefficient of Variation (σ/μ)',
'NOT_NORMAL': 'Distribution is not normal'})
# ABC
n_sku = len(df)
n_a, n_b = int(0.05*n_sku), int(0.5*n_sku)
# A, B, C on turnover
to_a, to_b = df[df['SKU_ID']==n_a]['QTY%'].max(), df[df['SKU_ID']==n_b]['QTY%'].max()
# A, B and C
fig.add_vline(to_a , line_width=1, line_dash="dot", line_color="red")
fig.add_vline(to_b , line_width=1, line_dash="dot", line_color="red")
# CV = 1
fig.add_hline(1 , line_width=1, line_dash="dot", line_color="black")
st.write(fig)
st.markdown(
"""
💡_For all the items that are normally distributed you can use a set of mathematical formula to estimate the minimum safety stock and build inventory management rules
to meet your targets of cycle service level.
([More details in this Article](https://towardsdatascience.com/inventory-management-for-retail-stochastic-demand-3020a43d1c14))_
""")
def distribution(df_abc, df, date_col, sku_col, metric_col):
# List of items with the lowest CV
LIST_LOW = list(df_abc.sort_values(['CV'], ascending = True)[sku_col].values[0:3])
LIST_DAYS = list(df[date_col].unique())
col1, col2 = st.beta_columns(2)
with col1:
item_low = st.selectbox("TOP 3 SKU WITH THE LOWEST CV",index= 0, options =LIST_LOW,key="date")
# ABC @ ITEM-LEVEL
# Item with Low CV
df_dist = df[df[sku_col].isin(LIST_LOW)].copy()
# ABC SKU-LEVEL
df_dist = pd.DataFrame(df_dist[[sku_col, date_col, metric_col]]
.groupby([sku_col, date_col]).sum()).reset_index()
df_dist = df_dist[df_dist[sku_col]==item_low]
# Simple histogram
fig = px.histogram(data_frame=df_dist,
width=800,
height=600,
x=metric_col,
labels={ metric_col: 'Sales Volume per Day (Units/Day)',
"count": 'Number of Days'})
fig.update_traces(marker_line_width=1,marker_line_color="black")
st.write(fig)
st.markdown(
'''
_Can you visually confirm if we can assume that the sales of the **item {}** are distributed normally?_
'''.format(item_low)) |
from __future__ import unicode_literals
from django.http import JsonResponse
from django.http import Http404
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def health(request):
state = {"status": "UP"}
return JsonResponse(state)
def handler404(request):
return render(request, '404.html', status=404)
def handler500(request):
return render(request, '500.html', status=500)
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## instdep.py
##
## Created on: Apr 19, 2020
## Author: Alexey Ignatiev
## E-mail: [email protected]
## Taken from: https://github.com/rjungbeck/pysat/blob/master/instdep.py
#
#==============================================================================
import argparse
import os
import platform
import re
import requests
import sys
#
#==============================================================================
suffixes = {
(2, 7, '32bit'): None,
(3, 6, '32bit'): 'cp36-cp36m-win32',
(3, 7, '32bit'): 'cp37-cp37m-win32',
(3, 8, '32bit'): 'cp38-cp38-win32',
(2, 7, '64bit'): None,
(3, 6, '64bit'): 'cp36-cp36m-win_amd64',
(3, 7, '64bit'): 'cp37-cp37m-win_amd64',
(3, 8, '64bit'): 'cp38-cp38-win_amd64'
}
#
#==============================================================================
def pycall(cmd):
fullcmd = sys.executable + ' ' + cmd
print(fullcmd)
os.system(fullcmd)
#
#==============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dependency installation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--release-url', type=str, default='https://api.github.com/repos/rjungbeck/pypblib/releases/latest',
help='Release URL')
params = parser.parse_args()
req = requests.get(params.release_url)
rsp = req.json()
version = sys.version_info[:2]
architecture = platform.architecture()[0]
wheelId = (version[0], version[1], architecture)
suffix = suffixes[wheelId]
print('Suffix', suffix)
for asset in rsp['assets']:
print(asset['name'])
if suffix and suffix in asset['name']:
pycall('-m pip install --upgrade {0}'.format(asset['browser_download_url']))
|
# plotting.py - plotting utilities
# ASS, 21 Apr 2020
#
# This file contains some utility functions for plotting CRNs
#
# Copyright (c) 2018, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
import math
import random
import statistics
from warnings import warn
from .components_basic import Protein, DNA, RNA
from .dna_part_cds import CDS
from .dna_part_misc import IntegraseSite, Origin, Operator, UserDefined
from .dna_part_promoter import Promoter
from .dna_part_rbs import RBS
from .dna_part_terminator import Terminator
from .propensities import MassAction
from .species import ComplexSpecies, Species
from .polymer import OrderedPolymer
from .dna_construct import Construct
from .utils import member_dictionary_search
import io
import base64
import copy
HAVE_MATPLOTLIB = False
try:
import matplotlib.pyplot as plt
from matplotlib import cm
HAVE_MATPLOTLIB = True
except ModuleNotFoundError:
pass
PLOT_DNA = False
try:
import dnaplotlib as dpl
PLOT_DNA = True
except ModuleNotFoundError:
pass
if(PLOT_DNA and not HAVE_MATPLOTLIB):
PLOT_DNA = False
PLOT_NETWORK = False
try:
import networkx as nx
from bokeh.models import (BoxSelectTool, Circle, EdgesAndLinkedNodes,
HoverTool, MultiLine, NodesAndLinkedEdges,
PanTool, Plot, Range1d, Square, TapTool,
WheelZoomTool)
from bokeh.plotting import from_networkx
from bokeh.palettes import Spectral4
from bokeh.io import export_svgs, output_notebook
from fa2 import ForceAtlas2
PLOT_NETWORK = True
except ModuleNotFoundError:
pass
empty_set_base64 = 'iVBORw0KGgoAAAANSUhEUgAAADcAAABACAYAAAC+/O8/AAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxOS0wNi0yOVQxMjo0Mjo1MyswODowMLVKQ5EAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MjErMDg6MDD8dsOAAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy85My8xNy85MzE3YzE3MDc3MWRkYjhkMjA1ZGI0ZDQyMDBkZTA5MS5zdmcgfPYlAAAJH0lEQVRoQ92bV2gVzxfHJ5ZEY6zR2LEm1mhiLCSiokLsqGB50DwIPojlxUIUjQjBrsResSIW7BVEUESCsUWwEhuxxxZ7L/Ob7+TsvXd3z957k7v5Q/4fOGZ298yce3Z3zs6cGcOkQvwfMHHiRBETEyPmzZtHZxRwrqyzZcsWPCAtffv2lc+ePdPny7xzFy5ckFFRUR7nIG3btpUfPnyQZfq1fPXqlWjatKn48eMHnfGSmJgoyqxzX79+1X3s27dvdMaMenKl49z79+/Fw4cPxa9fv8T9+/dFQUGBNvbz509RqVIlUb16dVG/fn0RFxcnKlSoIGJjY/W54pCcnCxycnLoyEvlypXFkSNHRGpqqnpBXWT37t1yzpw5ulOjH4SFhXn6ASe4XrVqVdmvXz+ZkZEhDx48SC35Z/bs2Wx7EFwzCNm5wsJCuXLlStmhQwcZHh7OGgxW1F2X8fHxctu2bVI9dbJgZtWqVWxdCG6qLyV2TnViuWPHDqleM9ZQqBIdHS2PHz8u//79SxalPHDgAKsLUYGFtLyUyLkTJ07oO8wZcVu6d+8ur127Jm/fvs1eh1SrVk3m5+fTr/NSbOfWrFnDGnASFTB0v0IfhKAcGRkZsD/6Cuo1btyYvYZXedeuXfTrzAQdLRHpVGcVy5cvpzPONGrUSHTq1EkkJCSIlJQUUatWLfHv3z99TTmFGyqePHkibty4oeXKlSvi5cuX+npxWbRokUhPT6cjC3AuEOpbIgcNGmS7a1Zp37693Ldvn3z+/LlUzlBt/0BPOSr37Nkj27Rpw7brJDNmzKBWeAI69/v3bzlgwAC2cUMw3EHnd4PDhw/r9jg7vpKWlkY1nAno3OTJk9nGDcH3qTRo2bIla8+QadOmkaYzfp3buHEj2zBEjTDkzp07SdNdEB2tg2FOtm7dSjV4HJ07e/asDrFco4h2Dx48IE13UYFFNmnShLVrlTp16kgVjKimHdY5FRn1kIhrsGLFiqTlPggsLVq0YO06yYgRI6i2HdY5pyEOvk25ubmk5S4Y8QwePJi1C0FkVINr9tqGDRuoFTM25969e+fYmTGGLC2mTJnC2oTAaaC+aex1jJa+f/+udXyxObd582a2gWHDhsk/f/6QlrusW7eOtQnp3bu3/PTpk9Z7+/at7NatG6uHVIMVm3MYHlkr4nU4f/48abgLPvpWe4Z07txZO+QLxrUIaFZdDM+smJxDCLZWgowaNYo03CU7O1vWrFmTtYmb7BSRBw4cyNYxEkMGJucQebhKd+7cIQ33wA/3N7NQs2nStIObwtUZO3YsaRRhco4Lw7169aKr7tK1a1ebLUOWLl1KWs7ExcXZ6uGcLx7nTp06JatUqWKrgAmp2zi9VpBghlVg2bJltrpq9iEvX75MGj7OOeUl1JSENNwBP56zAxk/fjxpBSYnJ4dtY+7cuaTh49ykSZNsipjCuMmmTZtkuXLlbHYgycnJsqCggDQD8/HjR9mwYUNbO1OnTiUNKcupEzoH+PTpUxRNREdHUyl0rl69KtQowzNp9QXpvUOHDom6devSmcCoca9o0KABHXlREVOoaZoua+fUR1Jnb62oeRWVQqdLly5C3W06MnPp0iVRr149OgoeNXCmkpfCwkLx5csXXdbOIYmqhl36hC/FuZP+QGrBCTxRpCRKQqtWrajkBc7hTQTaOWSGuXw7ssOhoIZrYujQoXRkJzMzUyQlJdFR8VEfeip5wWsPu0A7p/qeFivly5enUsnAWtmxY8foyMzo0aPFzJkz6cg98JaooKXL+t/w8HARERGhT/hivLslAa/b/Pnz6cgMAsjevXv131DgFkHgh5pz6rJ2rkaNGmxkRKApCVigQADhwJ01olmo5OXlUckL/IiKitJl7RxWWLhodevWLSoFD0IxVmA4YLgkbTrx5s0bKnnBKo/pycFTJFKtcJX9AceGDx9OR2bQf7Oyslz7vCAiYmnMivqwewJhUc9TqKkHlbyo2YBQo3c6CgwCBPoaR0ZGhkhLS6Oj0Ll7967Iz8+nIy+mdT49TlFgbYxbsVEjB9LwD4Y91rqGjBw5krTcY+3atTY7Knbo6ZCBxznQrFkzWwWM4AOxYMECWz1DSmvKlJCQYLMVGxtLV4swOde/f39bBQhSbk74WzNTEVM+fvyYNN0DuUrOnvp2kkYRJudOnz7NVnLKy9+8edNx8TEmJkZev36dNN0FySrOpuqHpFGEyTnAVapdu7Yts4vMcGJiIqsPCbavFhc8AC7VjpVYKzbn1q9fb6sIGTNmDGkUzaX69OnD6kGw8F8aIHHrtOKElKQVm3N4Ik6rmEbKwSnVDklPT9c6pQFyK5xNJJGx8cCKzTmwcOFCthGsm/kL+ejQvgv0bnLmzBnWJmTFihWkZYZ1DvumevTowTbkJAjDWFEtLTibkNTUVP26crDOAaxwYrGea9AqeC0+f/5MNd0FiWKn34FsHZ6oE47OATVlYRv1FayRuZ0hM8DiIqIgZxeCnRX+8OsccPqwG5KSkkKa7oJtVpw9Q8aNG0eazgR0LphdDEjLufX08JohpcjZMQQbb5z6mS9+nZs+fTrbuJMgCOHjDcPBRk1s1UAY379/v17V4dr1FYxVEfCCwXGTzeLFi0uc4+jYsaNo3bq16Nmzp4iPj9fzK5jBnE45rWfjmI/l5uaKixcv6o029+7do9rOTJgwQSxZsoRNDLHAOSsYYWDbES5bJZhdBr6CsSeWo7B5AMtVKEMiIiJYfSdZvXo1/brgsTmHbxUWFDgDEITmkydPsqns0hAMHI4ePUq/rnjYnMPWPs4IBNMbA7z3WCMv7hMIVvDEt2/frrdmlRSTc/7GjFlZWaRlBmNR1Tdd26KISImFfWw8CBWPc5mZmawxyKxZs0jLP5hRYEsFHA12dAO9pKQkvYSGVSA30dHy3LlzYsiQIZ4cuy/YUpidnU1HwYHEDTJTyJ4hCr548UKovqwjJdJuyLRhhQYbt5HuwzGXfQuVMMzNnHaGR0ZGitevXws1hqMzoaHuo99FEbcpp772VDSDb9OjR49ccwz8Lx3TYNjUvHlzUz/Atwz/jaSsowNKXl6ebNeuncc5bspeFjENv5AKx5qBinp0piwjxH8G5Zz6mESqhwAAAABJRU5ErkJggg=='
def updateLimits(limits, xvalues):
for value in xvalues:
if(value < limits[0]):
limits[0] = value
if(value > limits[1]):
limits[1] = value
return limits
def makeArrows2(graph_renderer, graph, positions, headsize=3, headangle=math.pi/6, make_arrows = True):
"""this function draws an arrow shape at the end of graph lines"""
xs, ys = [], []
xbounds = [0, 0]
ybounds = [0, 0]
for edge in graph.edges:
# iterate through all the edges
from_node = edge[0]
to_node = edge[1]
from_x = positions[from_node][0]
from_y = positions[from_node][1]
to_x = positions[to_node][0]
to_y = positions[to_node][1]
updateLimits(xbounds, [from_x, to_x])
updateLimits(ybounds, [from_y, to_y])
# above, we get all the variables
# below, we are assuming the "to" position is the middle of the
# coordinate space
ydif = from_y-to_y
xdif = from_x-to_x
# next we calculate the angle from the destination node
# to the source node
angl = math.atan2(ydif, xdif)
# the arrow consists of three added points, one on either side
# of the line and one in the middle
p1x = to_x+headsize*math.cos(angl+headangle) # left side of the arrow
p1y = to_y+headsize*math.sin(angl+headangle) # left side of the arrow
p2x = to_x+headsize*math.cos(angl-headangle) # right side of the arrow
p2y = to_y+headsize*math.sin(angl-headangle) # right side of the arrow
p3x = to_x+headsize*.7*math.cos(angl) # middle of the arrow
p3y = to_y+headsize*.7*math.sin(angl) # middle of the arrow
# 'xs' is a list of lists which represent each line from node to node
xs.append([from_x, p3x, p1x, to_x, p2x, p3x])
# 'ys' is the same thing except the y positions
ys.append([from_y, p3y, p1y, to_y, p2y, p3y])
# this part replaces the lines with the ones made by this function
if make_arrows:
graph_renderer.edge_renderer.data_source.data['xs'] = xs
graph_renderer.edge_renderer.data_source.data['ys'] = ys
return xbounds, ybounds
def graphPlot(DG,DGspecies,DGreactions,plot,layout="force",positions=None,plot_species = True, plot_reactions = True, plot_edges = True, plot_arrows = True,\
species_glyph_size = 12, reaction_glyph_size = 8, posscale = 1.0,layoutfunc=None,iterations=2000,rseed=30,show_species_images=False):
"""given a directed graph, plot it!
Inputs:
DG: a directed graph of type DiGraph
DGspecies: a directed graph which only contains the species nodes
DGreactions: a directed graph which only contains the reaction nodes
plot: a bokeh plot object
layout: graph layout function.
'force' uses fa2 to push nodes apart
'circle' plots the nodes and reactions in two overlapping circles, with the reactions on the inside of the circle
'custom' allows user input "layoutfunc". Internally, layoutfunc is passed the three inputs (DG, DGspecies, DGreactions)
and should output a position dictionary with node {<node number>:(x,y)}
positions: a dictionary of node names and x,y positions. this gets passed into the layout function
posscale: multiply the scaling of the plot. This only affects the arrows because the arrows are a hack :("""
random.seed(rseed)
if(not PLOT_NETWORK):
warn("network plotting disabled because some libraries are not found")
return
if(layout == "force"):
# below are parameters for the force directed graph visualization
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=True, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
# Prevent overlap (NOT IMPLEMENTED)
adjustSizes=False,
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.4*posscale,
strongGravityMode=False,
gravity=1.0,
# Log
verbose=False)
positions = forceatlas2.forceatlas2_networkx_layout(
DG, pos=positions, iterations=iterations)
elif(layout == "circle"):
positions = nx.circular_layout(DGspecies, scale=50*posscale)
positions.update(nx.circular_layout(DGreactions, scale=35*posscale))
elif(layout == "custom"):
positions = layoutfunc(DG, DGspecies, DGreactions)
reaction_renderer = from_networkx(DGreactions, positions, center=(0, 0))
species_renderer = from_networkx(DGspecies, positions, center=(0, 0))
edges_renderer = from_networkx(DG, positions, center=(0, 0))
#Set xbounds and ybounds:
xbounds = [0, 0]
ybounds = [0, 0]
for n in positions:
xbounds[0] = min([xbounds[0], positions[n][0]])
xbounds[1] = max([xbounds[1], positions[n][0]])
ybounds[0] = min([ybounds[0], positions[n][1]])
ybounds[1] = max([ybounds[1], positions[n][1]])
max_glyph = max([reaction_glyph_size, species_glyph_size])
xbounds[0] -= max_glyph
xbounds[1] += max_glyph
ybounds[0] -= max_glyph
ybounds[1] += max_glyph
# edges
edges_renderer.node_renderer.glyph = Circle(
size=species_glyph_size, line_alpha=0, fill_alpha=0, fill_color="color")
edges_renderer.edge_renderer.glyph = MultiLine(
line_alpha=0.2, line_width=4, line_join="round", line_color="color")
edges_renderer.edge_renderer.selection_glyph = MultiLine(
line_color=Spectral4[2], line_width=5, line_join="round")
edges_renderer.edge_renderer.hover_glyph = MultiLine(
line_color=Spectral4[1], line_width=5, line_join="round")
if plot_arrows:
xbounds_a, ybounds_a = makeArrows2(
edges_renderer, DG, positions, headsize=5) # make the arrows!
xbounds[0] = min([xbounds[0], xbounds_a[0]])
xbounds[1] = max([xbounds[1], xbounds_a[1]])
ybounds[0] = min([ybounds[0], ybounds_a[0]])
ybounds[1] = max([ybounds[1], ybounds_a[1]])
# we want to find the middle of the graph and plot a square that is 1:1 aspect ratio
# find the midpoint of the graph
xmid = statistics.mean(xbounds)
ymid = statistics.mean(ybounds)
# now, subtract the middle from the edges
xmiddlized = [a-xmid for a in xbounds]
ymiddlized = [a-ymid for a in ybounds]
# now, find the biggest dimension
absdim = max([abs(a) for a in xmiddlized+ymiddlized])
xlim = [xmid-absdim*1.05, xmid + absdim*1.05]
ylim = [ymid-absdim*1.05, ymid + absdim*1.05]
# now set it on the plot!
plot.x_range = Range1d(xlim[0], xlim[1])
plot.y_range = Range1d(ylim[0], ylim[1])
# reactions
reaction_renderer.node_renderer.glyph = Square(
size=reaction_glyph_size, fill_color="color")
reaction_renderer.node_renderer.selection_glyph = Square(
size=reaction_glyph_size, fill_color=Spectral4[2])
reaction_renderer.node_renderer.hover_glyph = Square(
size=reaction_glyph_size, fill_color=Spectral4[1])
# nodes
species_renderer.node_renderer.glyph = Circle(size=12, fill_color="color")
species_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
species_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
#this part adds the interactive elements that make it so that the lines are highlighted
#when you mouse over and click
edge_hover_tool = HoverTool(tooltips= None,renderers=[edges_renderer])
if( not show_species_images):
species_hover_tool = HoverTool(tooltips=[("name", "@species"), ("type", "@type")],\
renderers=[species_renderer],attachment="right")
else:
species_hover_tool = HoverTool(tooltips='<div><div> <img src="data:image/png;base64,@image" style="float: left; margin: 0px 0px 0px 0px;"></img></div></div>',\
renderers=[species_renderer],attachment="right")
rxn_hover_tool = HoverTool(tooltips=[("reaction", "@species"), ("type", "@type"),("k_f","@k"),("k_r","@k_r")],\
renderers=[reaction_renderer],attachment="right")
plot.add_tools(edge_hover_tool,species_hover_tool,rxn_hover_tool, TapTool(), BoxSelectTool(),PanTool(),WheelZoomTool())
edges_renderer.selection_policy = NodesAndLinkedEdges()
edges_renderer.inspection_policy = EdgesAndLinkedNodes()
if plot_edges:
plot.renderers.append(edges_renderer)
if plot_reactions:
plot.renderers.append(reaction_renderer)
if plot_species:
plot.renderers.append(species_renderer)
def generate_networkx_graph(CRN,useweights=False,use_pretty_print=False,pp_show_material=True,
pp_show_rates=True,pp_show_attributes=True, pp_show_compartments=True,
colordict=None,reactioncolordict = None, imagedict = None):
"""generates a networkx DiGraph object that represents the CRN.
input:
==========================
CRN: a CRN from mixture.get_model() for example
useweights: this will attempt to represent the reaction rates by the length of edges.
short edges are fast rates. It doesn't look very good usually
use_pretty_print: this uses the "pretty print" function to represent reactions and nodes a bit cleaner
the next three parameters are pretty_print parameters
pp_show_material: default false because this is listed in "type"
pp_show_rates: default true because this is useful information
pp_show_attributes
colordict: a dictionary containing which node types are what color based upon the following keywords:
Keywords are chosen to match species.material_type
{"complex": "cyan",
"protein": "green",
"dna": "grey",
"rna": "orange",
"ligand": "pink",
"phosphate": "yellow",
"nothing":"purple"}
When using a custom colordict, the following attributes will be checked to find colors with the first keys taking precedence:
repr(species): "color"
species.name: "color"
(species.material_type, tuple(species.attributes)): "color"
species.material_type: "color"
tuple(species.attributes): "color"
imagedict is a dictionary which contains species and their corresponding image representations.
This is the output generated by CRNPlotter.renderMixture()
output:
==================
CRNgraph: the DiGraph object containing all nodes and edges
CRNspeciesonly: a DiGraph object with only species
CRNreactionsonly: a DiGraph object with only reactions
"""
if(not PLOT_NETWORK):
warn("network plotting disabled because some libraries are not found")
return None, None, None
if (colordict is None):
colordict = {"complex": "cyan", "protein": "green",
"dna": "grey", "rna": "orange",
"ligand": "pink", "phosphate": "yellow", "nothing": "purple"}
CRNgraph = nx.DiGraph()
allnodenum = 1 # every node has an index
alledgenum = 0 #every edge has an index
# this starts at 1 because "nothing" is node 0
nodedict = {} # this is so that we can write out the reactions in
# the reaction "species" field
# it has {species:index}
rxnlist = [] # list of numbers corresponding to only reaction nodes
# sometimes reactions have no products. I want this to be represented in the graph with this
# "nothing" node. However, usually we are making degradation reactions which yield the
# degradation enzyme, so then it doesn't go to nothing. This means actually this node
# isn't use for anything. But i think it's good to have just in case.
default_species_color = "grey"
if (reactioncolordict is None or 'reaction' not in reactioncolordict) and 'reaction' not in colordict:
default_reaction_color = "cornflowerblue"
elif 'reaction' in colordict:
default_reaction_color = colordict['reaction']
else:
default_reaction_color = reactioncolordict['reaction']
if (reactioncolordict is None or 'edge' not in reactioncolordict) and 'edge' not in colordict:
default_edge_color = 'gray'
elif 'edge' in colordict:
default_edge_color = colordict['edge']
else:
default_edge_color = reactioncolordict['edge']
nodedict["nothing"] = 0
CRNgraph.add_node(0)
CRNgraph.nodes[0]["type"] = "nothing"
CRNgraph.nodes[0]["species"] = "nothing"
CRNgraph.nodes[0]["image"] = empty_set_base64
if("nothing" in colordict):
CRNgraph.nodes[0]["color"] = colordict["nothing"]
for species in CRN.species:
# add all species first
species_color = member_dictionary_search(species,colordict)
if(species_color is None):
species_color = default_species_color
nodedict[species] = allnodenum
CRNgraph.add_node(allnodenum)
CRNgraph.nodes[allnodenum]["type"]=str(species.material_type)
if((imagedict is not None) and (species in imagedict)):
CRNgraph.nodes[allnodenum]["image"]= imagedict[species].decode()
if(not use_pretty_print):
CRNgraph.nodes[allnodenum]["species"] = str(species)
else:
spectxt = species.pretty_print(
show_material=pp_show_material, show_compartment=pp_show_compartments)
CRNgraph.nodes[allnodenum]["species"] = spectxt
CRNgraph.nodes[allnodenum]["color"] = species_color
allnodenum += 1
# reactions follow, allnodenum is not reset between these two loops
for rxn in CRN.reactions:
CRNgraph.add_node(allnodenum)
CRNgraph.nodes[allnodenum]["type"] = str(rxn.propensity_type)
if isinstance(rxn.propensity_type, MassAction):
CRNgraph.nodes[allnodenum]["k"] = str(
rxn.propensity_type.k_forward)
CRNgraph.nodes[allnodenum]["k_r"] = str(
rxn.propensity_type.k_reverse)
elif hasattr(rxn.propensity_type, "k"):
CRNgraph.nodes[allnodenum]["k"] = str(rxn.propensity_type.k)
CRNgraph.nodes[allnodenum]["k_r"] = ''
else:
CRNgraph.nodes[allnodenum]["k"] = ''
CRNgraph.nodes[allnodenum]["k_r"] = ''
reaction_color = member_dictionary_search(rxn,reactioncolordict)
if(reaction_color is None):
reaction_color = default_reaction_color
# CRNgraph.nodes[allnodenum]
if isinstance(rxn.propensity_type, MassAction):
kval = rxn.propensity_type.k_forward
CRNgraph.nodes[allnodenum]["k"] = str(kval)
elif hasattr(rxn.propensity_type, "k"):
kval = rxn.propensity_type.k
CRNgraph.nodes[allnodenum]["k"] = str(rxn.propensity_type.k)
else:
CRNgraph.nodes[allnodenum]["k"] = ''
if(not useweights):
kval = 1
if isinstance(rxn.propensity_type, MassAction):
krev_val = rxn.propensity_type.k_reverse
else:
krev_val = None
if((krev_val is not None) and (not useweights)):
krev_val = 1
for reactant in rxn.inputs:
#Set edge color
edge_color = member_dictionary_search(rxn,reactioncolordict)
if edge_color is None:
edge_color = default_edge_color
CRNgraph.add_edge(nodedict[reactant.species],allnodenum,weight=kval)
CRNgraph.edges[nodedict[reactant.species],allnodenum]['color'] = edge_color
if(krev_val is not None):
# if the k is 0 then the node does not exist, right?
CRNgraph.add_edge(
allnodenum, nodedict[reactant.species], weight=krev_val)
CRNgraph.edges[allnodenum, nodedict[reactant.species]]['color'] = edge_color
for product in rxn.outputs:
#TODO species cannot find another species in the nodedict????
CRNgraph.add_edge(allnodenum,nodedict[product.species],weight=kval)
CRNgraph.edges[allnodenum,nodedict[product.species]]['color'] = edge_color
if(krev_val is not None):
CRNgraph.add_edge(
nodedict[product.species], allnodenum, weight=krev_val)
CRNgraph.edges[nodedict[product.species], allnodenum]['color'] = edge_color
if(len(rxn.outputs) == 0):
# this adds an edge to the "nothing" node we made in the beginning
CRNgraph.add_edge(allnodenum, 0, weight=kval)
CRNgraph.edges[allnodenum, 0]['color'] = edge_color
if(krev_val is not None):
CRNgraph.add_edge(0, allnodenum, weight=krev_val)
CRNgraph.edges[0, allnodenum]['color'] = edge_color
elif(len(rxn.inputs) == 0):
# this adds an edge from the "nothing" node we made in the beginning
CRNgraph.add_edge(0, allnodenum, weight=kval)
CRNgraph.edges[0, allnodenum]['color'] = edge_color
if(krev_val is not None):
CRNgraph.add_edge(allnodenum, 0, weight=krev_val)
CRNgraph.edges[allnodenum, 0]['color'] = edge_color
CRNgraph.nodes[allnodenum]["color"] = reaction_color
if(not use_pretty_print):
CRNgraph.nodes[allnodenum]["species"] = str(rxn)
else:
rxntxt = rxn.pretty_print(
show_material=pp_show_material, show_rates=pp_show_rates, show_attributes=pp_show_attributes)
# this will show up as "reaction" in the tool tip
CRNgraph.nodes[allnodenum]["species"] = rxntxt
# the name of the reaction is the string representation
rxnlist += [allnodenum]
allnodenum += 1
CRNspeciesonly = CRNgraph.copy()
CRNspeciesonly.remove_nodes_from(rxnlist)
CRNreactionsonly = CRNgraph.copy()
CRNreactionsonly.remove_nodes_from(range(rxnlist[0]))
return CRNgraph, CRNspeciesonly, CRNreactionsonly
class CRNPlotter:
class MultiPart:
def __init__(self,name,parts_list,bound = None):
"""multiple simple parts which are treated as one"""
self.name = name
self.parts_list = parts_list
self.bound = None
def get_directed(self,direction,bound=None,non_binder=None):
"""returns a copy of itself with the direction changed to the value of 'direction'.
In the case of MultiPart it also means reversing the order of the subparts.
A MultiPart binds to things differently from a normal part. the binding is distributed among
the subparts. "non_binder" indicates a dpl_type which should not be shown binding to things."""
if(non_binder is None):
#by default we assume that promoters that are part of MultiParts don't bind to things
#that's because MultiPart is currently only used for repressible Promoters with Operators
non_binder = ["Promoter"]
new_multipart = copy.deepcopy(self)
bound_for_distribution = None
if(bound is not None):
bound_for_distribution = copy.copy(bound)
elif(self.bound is not None):
bound_for_distribution = copy.copy(self.bound)
if(bound_for_distribution is not None):
#distribute the "bound" items among the parts contained within the MultiPart
recursion = 10
while(len(bound_for_distribution)>0 and recursion > 0):
for part in new_multipart.parts_list:
if(part.dpl_type not in non_binder):
if(part.bound is None):
part.bound = [bound_for_distribution.pop(0)]
else:
part.bound += [bound_for_distribution.pop(0)]
if(len(bound_for_distribution) == 0):
break
recursion -= 1
if(recursion == 0):
#this is likely to happen if everything in the multipart is marked as "non binding"
raise ValueError(f"reached maximum recursion when trying to populate multipart {self}")
#actually changing the direction of each part inside the MultiPart
new_multipart.parts_list = [a.get_directed(direction) for a in new_multipart.parts_list]
if(direction=="reverse"):
new_multipart.parts_list = new_multipart.parts_list[::-1]
return new_multipart
else:
return new_multipart
def get_dpl(self):
"""dnaplotlib takes these dictionaries as input. So this converts the MultiPart object
into a list of dictionaries that matplotlib can understand"""
outlist = []
for part in self.parts_list:
outlist+= part.get_dpl()
return outlist
def __repr__(self):
return "MultiPart("+",".join([str(a) for a in self.parts_list])+")"
class SimpleConstruct:
def __init__(self,name,parts_list,circular=False,material_type="dna",label_size=13,added_opts = None):
"""this is a simplified version of a DNAconstruct which mostly only has information relevant to plotting"""
self.name = name
self.parts_list = parts_list
self.circular = circular
self.material_type = material_type
self.label_size = label_size
if(added_opts is None):
self.added_opts = {}
def get_dpl(self):
"""output a list of dictionaries suitable for dnaplotlib"""
outlist = []
for part in self.parts_list:
part_dpl = part.get_dpl()
for subpart_dpl in part_dpl:
subpart_dpl["opts"].update(self.added_opts)
outlist+= part_dpl
return outlist
def get_dpl_binders(self):
"""output a dnaplotlib dictionary list to represent the "binders".
Binders are "regulation" arcs modified to draw a SBOL glyph instead of a line. """
my_dpl_output = self.get_dpl()
out_regs = []
for design in my_dpl_output:
if('make_binders' in design):
for binder in design['make_binders']:
linecolor = 'blue'
if(binder.material_type=='dna'):
linecolor = 'black'
elif(binder.material_type=='rna'):
linecolor = 'red'
if(hasattr(binder,"color")):
bindcolor = binder.color
else:
bindcolor = (0.5,0.5,0.5)
out_reg = {'type':'Binding', 'from_part':design, 'to_part':design,
'opts':{'label':binder.name,'label_size':self.label_size*.7,\
'color':linecolor, 'label_x_offset':-1,'y_offset':10,\
'face_color':bindcolor}
}
out_reg['opts'].update(binder.added_opts)
out_regs+=[out_reg]
return out_regs,my_dpl_output
def renderDNA(self, dna_renderer,ax=None,plot_backbone=True):
part_renderers = dna_renderer.SBOL_part_renderers()
reg_renderers = dna_renderer.std_reg_renderers()
if(ax is None):
figsize = (1,1)
#fig,ax = plt.subplots(constrained_layout=True)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0,0,1,1])
plt.tight_layout(pad=0.0001)
my_regs,my_designs = self.get_dpl_binders()
for part in my_designs:
part['opts'].update({'edgecolor':dna_renderer.linecolor})
start,end = dna_renderer.renderDNA(ax,my_designs,part_renderers,circular=self.circular,\
regs=my_regs, reg_renderers=reg_renderers,plot_backbone=plot_backbone)
fig = ax.get_figure()
ax.axis('off')
''
ylimits = [None,None]
xlimits = [None,None]
relevant_stuff = ax.patches+ax.texts
for patch in relevant_stuff:
bbox = patch.get_window_extent(renderer=fig.canvas.get_renderer())
if(ylimits == [None,None]):
ylimits = [bbox.ymin,bbox.ymax]
if(bbox.ymax > ylimits[1]):
ylimits[1] = bbox.ymax
if(bbox.ymin < ylimits[0]):
ylimits[0] = bbox.ymin
if(xlimits == [None,None]):
xlimits = [bbox.xmin,bbox.xmax]
if(bbox.xmax > xlimits[1]):
xlimits[1] = bbox.xmax
if(bbox.xmin < xlimits[0]):
xlimits[0] = bbox.xmin
xlimits[0],ylimits[0] = fig.transFigure.inverted().transform((xlimits[0], ylimits[0]))
xlimits[1],ylimits[1] = fig.transFigure.inverted().transform((xlimits[1], ylimits[1]))
ax.relim()
yheight = ylimits[1]-ylimits[0]
xheight = xlimits[1]-xlimits[0]
fig.set_size_inches(xheight/24,yheight/24)
ax.set_aspect('equal')
ax.autoscale_view()
return ax
class SimplePart:
def __init__(self,name,dpl_type,direction='forward',bound=None,color=None,\
color2=None,show_label=True,label_size = 13,label_y_offset = -8,added_opts=None,material_type=None):
"""a simple 'part' for the sole purpose of rendering using DNAplotlib"""
self.name = name
self.color = color
self.color2 = color2
self.dpl_type = dpl_type #this is a string which dnaplotlib knows about
self.direction=direction #"forward" or "reverse"
self.bound = bound #this should be a list of SimpleParts
if(added_opts is None):
self.added_opts = {}
else:
self.added_opts = added_opts #dictionary of keywords for dnaplotlib
self.show_label =show_label #if the label should be added to the 'opts' upon output
self.label_size = label_size #font size of the label
self.label_y_offset = label_y_offset
self.material_type = material_type
def get_directed(self,direction,bound=None):
copied_part = copy.copy(self)
copied_part.direction = direction
if(bound is not None):
copied_part.bound=bound
elif(self.bound is not None):
copied_part.bound = self.bound
return copied_part
def get_dpl(self,bound=None):
direction = True
if(self.direction == "reverse"):
direction = False
dpl_out = {'type':self.dpl_type, 'name':self.name, 'fwd':direction}
opts = {'color':self.color,'color2':self.color2}
if(self.show_label):
opts['label']=self.name
opts['label_size']=self.label_size
opts['label_y_offset']=self.label_y_offset
opts.update(self.added_opts)
dpl_out['opts']=opts
if(bound is not None):
dpl_out['make_binders']=bound
elif(self.bound is not None):
dpl_out['make_binders']=self.bound
return [dpl_out]
def __repr__(self):
return "SimplePart("+str(self.name)+"-"+str(self.direction)[0]+")"
def __init__(self,dna_renderer=None,rna_renderer=None,cmap = "Set3",colordict=None):
if(dna_renderer is None):
self.dna_renderer=dpl.DNARenderer(scale = 5,linewidth=3)
else:
self.dna_renderer = dna_renderer
if(rna_renderer is None):
self.rna_renderer=dpl.DNARenderer(scale = 5,linewidth=3,linecolor=(1,0,0))
else:
self.rna_renderer = rna_renderer
if(colordict is None):
colordict = {}
self.colordict = colordict
if(isinstance(cmap,str)):
self.cmap = plt.get_cmap(cmap).colors
elif(isinstance(cmap,list)):
self.cmap = cmap
else:
self.cmap = None
self.color_counter = 0
self.clear_dicts()
def renderMixture(self,mixture,crn = None,rna_renderer=None,dna_renderer=None,store=True,output=None,recursion_depth = 4,compiled_components=None):
"""creates dnaplotlib images for all relevant species in a mixture"""
if(crn is None):
mycrn,compiled_components = mixture.compile_crn(return_enumerated_components = True,
initial_concentrations_at_end = True,
copy_objects = False,
add_reaction_species = False,recursion_depth=recursion_depth)
else:
mycrn = crn
if(rna_renderer is None and self.rna_renderer is not None):
rna_renderer = self.rna_renderer
else:
raise ValueError("rna_renderer cannot be None")
if(dna_renderer is None and self.dna_renderer is not None):
dna_renderer = self.dna_renderer
else:
raise ValueError("dna_renderer cannot be None")
self.clear_dicts()
if(compiled_components is None):
#this only happens if CRN and mixture are both given
_,compiled_components = mixture.compile_crn(return_enumerated_components = True,
initial_concentrations_at_end = True,
copy_objects = False,
add_reaction_species = False,recursion_depth=recursion_depth)
for component in compiled_components:
if(isinstance(component,Construct)):
a = self.make_dpls_from_construct(component)
plt.ioff()
for species in mycrn.species:
a = self.make_dpl_from_species(species)
if(a.material_type is not None):
plot_bb = True
if(not isinstance(a,self.SimpleConstruct)):
plot_bb = False
#if we aren't looking at a construct then don't plot the backbone
newcon = self.SimpleConstruct(a.name,[a],material_type=a.material_type)
else:
newcon = a
if(newcon.material_type=='rna'):
ax = newcon.renderDNA(rna_renderer,plot_backbone=plot_bb)
else:
ax = newcon.renderDNA(dna_renderer,plot_backbone=plot_bb)
if(store):
#this part converts the matplotlibplot into a base64-encoded image
imagestream = io.BytesIO()
fig = ax.get_figure()
if(output is not None):
fig.savefig(output+"_"+str(species).replace("_","").replace("forward","f").replace("reverse","r").replace("part","")+".pdf",bbox_inches='tight')
fig.savefig(imagestream,bbox_inches='tight')
plt.close()
png_str = base64.b64encode(imagestream.getvalue())
self.species_image_dict[species]= png_str
return self.species_image_dict
def renderConstruct(self,construct_obj,rna_renderer=None,dna_renderer=None,showlabels=True, render_rna=False):
if(rna_renderer is None and self.rna_renderer is not None):
rna_renderer = self.rna_renderer
else:
raise ValueError("rna_renderer cannot be None")
if(dna_renderer is None and self.dna_renderer is not None):
dna_renderer = self.dna_renderer
else:
raise ValueError("dna_renderer cannot be None")
a = self.make_dpls_from_construct(construct_obj)
outaxs = [a.renderDNA(dna_renderer)]
if(render_rna):
components = construct_obj.enumerate_constructs()
for component in components:
if(isinstance(component,RNA)):
a = self.make_dpls_from_construct(component)
outaxs += [a.renderDNA(rna_renderer)]
return outaxs
def clear_dicts(self):
self.part_dpl_dict = {}
self.construct_dpl_dict = {}
self.species_dpl_dict = {}
self.species_image_dict ={}
def get_color(self):
if(self.cmap is not None):
out_color = self.cmap[self.color_counter]
self.color_counter+=1
if(self.color_counter>=len(self.cmap)):
self.color_counter = 0
return out_color
else:
raise ValueError("No colormap set")
def make_dpls_from_construct(self,construct,save_in_dict=True):
if(construct in self.construct_dpl_dict):
return self.construct_dpl_dict[construct]
else:
new_parts_list = []
for part in construct:
new_parts_list += [self.make_dpl_from_part(part,save_in_dict=save_in_dict)]
if(isinstance(construct,DNA)):
mat_type = "dna"
elif(isinstance(construct,RNA)):
mat_type = "rna"
simple_construct = self.SimpleConstruct(name = construct.name,\
parts_list=new_parts_list,\
circular=construct.circular,\
material_type = mat_type)
if(save_in_dict):
self.construct_dpl_dict[construct]=simple_construct
return simple_construct
def make_dpl_from_species(self,species):
if(species in self.species_dpl_dict):
return self.species_dpl_dict[species]
else:
if(isinstance(species,OrderedPolymer)):
#we are dealing with a polymer
polylist = [] #accumulating the list of SimpleParts
for monomer in species:
#going through the monomers
removed_monomer = monomer.get_removed()
if(removed_monomer in self.species_dpl_dict):
#if we already know about this monomer, just use that
polylist += [self.species_dpl_dict[removed_monomer].get_directed(monomer.direction)]
elif(isinstance(monomer,ComplexSpecies)):
#if the monomer is a complex that means we have to make a bound simplepart
binders = []
base_simplepart = None #we must figure out who the base is. This is how we do that
for specie in monomer.get_species(recursive=True):
if(isinstance(specie,ComplexSpecies)):
continue
if(specie.material_type=='part'):
#this material type is only made by dna constructs
base_simplepart = copy.copy(self.make_dpl_from_species(specie)) #copy it because now we make it bound to stuff
#ideally you already ran make_dpl_from_construct and so this will be populated
else:
binders += [self.make_dpl_from_species(specie)] #other stuff gets added as a binder
base_simplepart.bound = binders
self.species_dpl_dict[removed_monomer]=base_simplepart
polylist += [base_simplepart.get_directed(monomer.direction)]
elif(isinstance(monomer,Species)):
#in this case we couldnt find the monomer in our dictionary, but it is
#at the base level
base_simplepart = self.SimplePart(monomer.name,'UserDefined',color=self.get_color()) #new simplepart
self.species_dpl_dict[removed_monomer]=base_simplepart #adding it to the dictionary for the future
polylist += [base_simplepart.get_directed(monomer.direction)] #add it to the polymer, with a direction
out_dpl = self.SimpleConstruct(name = species.name,parts_list = polylist,circular=species.circular,material_type = species.material_type)
elif(isinstance(species,ComplexSpecies)):
#if we have a complex but it is not a polymer, we just do the "binding" determination of the polymer part
base_simplepart = None
binders = []
for specie in species.species:
#i don't really care who is the base of this thing
if(base_simplepart is None):
base_simplepart = self.make_dpl_from_species(specie)
else:
binders += [self.make_dpl_from_species(specie)]
out_dpl = base_simplepart.get_directed(None,binders)
elif(isinstance(species,Species)):
#this happens if we have another type of species
dpl_type = 'Origin' #default is 'origin', which is just a circle
if(species.material_type=='dna'):
dpl_type = 'UserDefined'
elif(species.material_type=='rna'):
dpl_type = 'NCRNA'
out_dpl = self.SimplePart(name = species.name,dpl_type = dpl_type,color=self.get_color(),material_type=species.material_type)
self.species_dpl_dict[species] = out_dpl
return out_dpl
def make_dpl_from_part(self,part,set_color=None,save_in_dict=True,set_color2=None):
"""create a DNAplotlib dictionary from a part"""
removed_part = part.get_removed()
if(len(self.colordict)>0 and set_color is None):
search_color = member_dictionary_search(removed_part,self.colordict)
if(search_color is not None):
set_color = search_color
if(removed_part in self.part_dpl_dict):
return self.part_dpl_dict[removed_part].get_directed(part.direction)
else:
dpl_type= "UserDefined"
needs_color2 = False
regs = None
if(isinstance(part,Promoter)):
dpl_type = "Promoter"
if(hasattr(part,"regulators")):
regs = part.regulators
elif(isinstance(part,RBS)):
dpl_type = "RBS"
elif(isinstance(part,CDS)):
dpl_type = "CDS"
elif(isinstance(part,Protein)):
dpl_type = "CDS"
elif(isinstance(part,Operator)):
dpl_type="Operator"
elif(isinstance(part,Origin)):
dpl_type = "Origin"
elif(isinstance(part,UserDefined)):
if(part.dpl_type is not None):
dpl_type = part.dpl_type
elif(isinstance(part,Terminator)):
dpl_type = "Terminator"
elif(isinstance(part,IntegraseSite)):
if(part.site_type in ["attP","attB"]):
dpl_type = "RecombinaseSite"
elif(part.site_type in ["attL","attR"]):
dpl_type = "RecombinaseSite2"
needs_color2 = True
for key_part in self.part_dpl_dict:
stored_part = self.part_dpl_dict[key_part]
if(isinstance(key_part,IntegraseSite) and \
key_part.integrase==part.integrase and \
key_part.dinucleotide==part.dinucleotide):
types_list = [key_part.site_type,part.site_type]
if(types_list in [["attP","attL"],["attL","attR"],["attR","attL"], ["attB","attR"]]):
if(set_color2 is None):
set_color2 = stored_part.color
if(types_list in [["attB","attL"],["attP","attR"],["attL","attB"],\
["attR","attP"]]):
if(set_color is None):
set_color = stored_part.color
elif(types_list in [["attL","attR"],["attR","attL"],\
["attL","attP"],["attR","attB"]]):
if(set_color is None):
set_color = stored_part.color2
if(set_color is not None and set_color2 is not None):
#we found all the needed colors so give up
break
if(set_color is None):
color = self.get_color()
else:
color = set_color
color2 = None
if(set_color2 is not None):
color2 = set_color2
elif(set_color2 is None and needs_color2):
color2 = self.get_color()
outpart = self.SimplePart(name=part.name,\
dpl_type=dpl_type,
color=color,
color2=color2)
if(regs is not None):
regparts = []
for reg in regs:
regpart = self.SimplePart(name=reg,dpl_type="Operator",color=color,show_label=False)
regparts += [regpart]
retpart = self.MultiPart(name=part.name,parts_list =[outpart]+regparts)
else:
retpart = outpart
if(save_in_dict):
self.part_dpl_dict[removed_part] = retpart
self.species_dpl_dict[part.dna_species] = retpart
return retpart.get_directed(part.direction)
def render_constructs(constructs,color_dictionary=None):
"""wrapper around CRNPlotter class to make a bunch of constructs which are color coordinated"""
plotter = CRNPlotter(colordict=color_dictionary)
axes = []
for construct in constructs:
axes += [plotter.renderConstruct(construct)]
return axes
def render_mixture(mixture,crn,color_dictionary=None,output = None,compiled_components=None):
plotter = CRNPlotter(colordict=color_dictionary)
return plotter.renderMixture(mixture,crn,output=output,compiled_components=compiled_components)
def render_network_bokeh(CRN,layout="force", layoutfunc = None, plot_reactions = True, plot_species = True, plot_edges = True, plot_arrows = True,
iterations=2000,rseed=30,posscale=1,export=False,species_glyph_size = 12, reaction_glyph_size = 8, export_name = None, **keywords):
DG, DGspec, DGrxn = generate_networkx_graph(CRN,**keywords) #this creates the networkx objects
plot = Plot(plot_width=500, plot_height=500, x_range=Range1d(-500, 500), y_range=Range1d(-500, 500)) #this generates a
show_im = False
images = None
if("imagedict" in keywords and keywords["imagedict"] is not None):
show_im = True
if(export):
plot.output_backend = "svg"
graphPlot(DG,DGspec,DGrxn,plot,layout=layout,posscale=posscale,iterations=iterations,rseed=rseed,show_species_images=show_im,
layoutfunc=layoutfunc, plot_reactions = plot_reactions, plot_species = plot_species, plot_edges = plot_edges, plot_arrows = plot_arrows,
species_glyph_size = species_glyph_size, reaction_glyph_size = reaction_glyph_size)
if(export):
if export_name is None:
raise ValueError("To export you must supply export_name keyword.")
export_svgs(plot,filename=export_name+".svg")
return plot
|
from setuptools import setup, find_packages
'''
py_modules:只识别.py文件,其他文件不识别
可以自动添加README.rst和MANIFEST.in文件,但不能自动添加LICENSE.txt文件
packages=find_packages 让setup自动查找需要打包的子包
platforms=["all"] 支持所有平台使用
'''
setup(
name='fileutil',
version='1.0.4',
description=r'''
file union,file split by row/size/column,txt file transform to excel or excel file transform to txt and so on
''',
long_description=open('README.rst', encoding='utf8').read(),
platforms=["all"],
author='Mr linle',
author_email='[email protected]',
python_requires='>=3',
license='MIT',
# py_modules=['common', 'filesplit', 'fileunion', 'filetransform', 'parser'],
packages=find_packages(),
install_requires=[
'cchardet',
'pathlib',
'pandas',
'xlrd',
'numpy',
'xlsxwriter'
],
url='https://github.com/lurkera/fileutil'
)
|
from os.path import expanduser
from pkg_resources import resource_string
SETTINGS_FILE = expanduser('~/.digiglass')
APP_NAME = 'digiglass'
APP_AUTHOR = 'mplewis'
CACHE_EXPIRY = 900 # 15 minutes
# The max number of categories to display when asking the user to choose
MAX_CATEGORIES = 20
# Used for getting choices from the user
LETTERS = 'abcdefghijklmnopqrstuvwxyz'[:MAX_CATEGORIES]
def DEFAULT_SETTINGS():
return resource_string(__name__, 'resource/default_settings.yml')
|
from django.contrib import admin
from wkz import models
admin.site.register(models.Sport)
admin.site.register(models.Activity)
admin.site.register(models.Settings)
admin.site.register(models.Traces)
admin.site.register(models.Lap)
admin.site.register(models.BestSection)
|
import os
from datetime import datetime
_OUTPUT_DIR = 'data'
def ensure_path(path):
if not os.path.exists(path):
os.makedirs(path)
def ensure_output_path(mod):
p = _OUTPUT_DIR + '/' + mod
ensure_path(p)
def get_output_dir(mod):
return _OUTPUT_DIR + '/' + mod
def get_output_path(mod, file):
return get_output_dir(mod) + '/' + file
def date_string():
return datetime.now().strftime("%Y%m%d-%H%M%S")
|
from interactions.utils.sim_focus import SimFocus, get_next_focus_id
from socials.geometry import SocialGeometry
import interactions.utils.sim_focus
import sims4.log
import sims4.math
import services
logger = sims4.log.Logger('Social Group')
class SocialFocusManager:
class SimFocusEntry:
def __init__(self, sim, score, layer):
self.sim_id = sim.id
self.score = score
self.layer = layer
self._focus_bone = sim.get_focus_bone()
self._focus_ids = {}
def add_focus_id(self, target_id, focus_id):
self._focus_ids[target_id] = focus_id
def get_focus_id(self, target_id):
return self._focus_ids.get(target_id)
def remove_focus(self, owner_sim, target_id):
focus_id = self.get_focus_id(target_id)
if focus_id is not None:
interactions.utils.sim_focus.FocusDelete(owner_sim, self._sim_id, focus_id)
del self._focus_ids[target_id]
def __init__(self, social_group):
self._social_group = social_group
self._sim_focus_info = {SimFocus.LAYER_SUPER_INTERACTION: {}, SimFocus.LAYER_INTERACTION: {}}
def shutdown(self, sim):
if self._sim_focus_info is None:
return
for v in list(self._sim_focus_info.values()):
for sim_entry in list(v.values()):
for focus_id in list(sim_entry._focus_ids.values()):
if sim is None:
sim = services.object_manager().get(sim_entry.sim_id)
if sim is not None:
interactions.utils.sim_focus.FocusDelete(sim, sim_entry.sim_id, focus_id)
else:
interactions.utils.sim_focus.FocusDebug('Focus: Leaking focus id ' + str(focus_id))
self._sim_focus_info = None
self._social_group = None
def get_key(self, layer, owner_sim, sim):
if layer is SimFocus.LAYER_SUPER_INTERACTION:
return (0, sim.id)
else:
return (owner_sim.id, sim.id)
def add_sim(self, owner_sim, sim, score, layer):
if self._sim_focus_info is None:
return
key = self.get_key(layer, owner_sim, sim)
my_entry = self.SimFocusEntry(sim, score, layer)
if key in self._sim_focus_info[layer]:
my_entry = self._sim_focus_info[layer][key]
my_entry.score = score
else:
self._sim_focus_info[layer][key] = my_entry
for (k, sim_entry) in self._sim_focus_info[layer].items():
if sim_entry.sim_id != sim.id:
if k[0] == key[0]:
focus_id = my_entry.get_focus_id(sim_entry.sim_id)
if focus_id is not None:
interactions.utils.sim_focus.FocusModifyScore(owner_sim, my_entry.sim_id, focus_id, sim_entry.score)
else:
focus_id = get_next_focus_id()
my_entry.add_focus_id(sim_entry.sim_id, focus_id)
interactions.utils.sim_focus.FocusAdd(owner_sim, focus_id, sim_entry.layer, sim_entry.score, my_entry.sim_id, sim_entry.sim_id, sim_entry._focus_bone, sims4.math.Vector3(0, 0, 0))
focus_id = sim_entry.get_focus_id(sim.id)
if focus_id is not None:
interactions.utils.sim_focus.FocusModifyScore(owner_sim, sim_entry.sim_id, focus_id, my_entry.score)
else:
focus_id = get_next_focus_id()
sim_entry.add_focus_id(my_entry.sim_id, focus_id)
interactions.utils.sim_focus.FocusAdd(owner_sim, focus_id, my_entry.layer, my_entry.score, sim_entry.sim_id, my_entry.sim_id, my_entry._focus_bone, sims4.math.Vector3(0, 0, 0))
def clear_sim(self, owner_sim, sim, layer):
if self._sim_focus_info is None:
return
key = self.get_key(layer, owner_sim, sim)
if key in self._sim_focus_info[layer]:
self.add_sim(owner_sim, sim, -1, layer)
def remove_sim(self, owner_sim, sim):
if self._sim_focus_info is None:
return
for layer in self._sim_focus_info.keys():
for k in list(self._sim_focus_info[layer].keys()):
sim_entry = self._sim_focus_info[layer][k]
if sim_entry.sim_id != sim.id:
sim_entry.remove_focus(owner_sim, sim)
else:
for focus_id in sim_entry._focus_ids.values():
interactions.utils.sim_focus.FocusDelete(owner_sim, sim.id, focus_id)
del self._sim_focus_info[layer][k]
def print_info(self):
if self._sim_focus_info is None:
return
interactions.utils.sim_focus.FocusDebug('Focus Man: ' + str(self) + ' ----------------------------------------')
for (layer, v) in list(self._sim_focus_info.items()):
interactions.utils.sim_focus.FocusDebug('Layer:' + str(layer))
for (k, sim_entry) in v.items():
interactions.utils.sim_focus.FocusDebug(' Key:' + str(k))
for (target_id, focus_id) in sim_entry._focus_ids.items():
target_key = (k[0], target_id)
target_entry = self._sim_focus_info[layer].get(target_key)
score = 0
if target_entry:
score = target_entry.score
else:
score = 'Unknown'
interactions.utils.sim_focus.FocusDebug(' Sim:' + str(sim_entry.sim_id) + ' Target:' + str(target_id) + ' focus_id:' + str(focus_id) + ' Score:' + str(score))
interactions.utils.sim_focus.FocusDebug('End Focus Man: ----------------------------------------')
def force_update(self, owner_sim, participant_list):
if participant_list:
for (participant, _) in participant_list:
interactions.utils.sim_focus.FocusForceUpdate(owner_sim, participant.id)
def active_focus_begin(self, owner_sim, participant_list, immediate=False):
if participant_list:
for (participant, score) in participant_list:
self.add_sim(owner_sim, participant, score, SimFocus.LAYER_INTERACTION)
if immediate:
self.force_update(owner_sim, participant_list)
def active_focus_end(self, owner_sim, participant_list):
if participant_list:
for (participant, _) in participant_list:
self.clear_sim(owner_sim, participant, SimFocus.LAYER_INTERACTION)
def get_focus_entry_for_sim(self, owner_sim, sim, layer):
key = self.get_key(layer, owner_sim, sim)
if self._sim_focus_info is not None:
return self._sim_focus_info[layer].get(key)
def add_focus_entry_for_sim(self, owner_sim, sim, layer, entry):
if self._sim_focus_info is None:
return
key = self.get_key(layer, owner_sim, sim)
self._sim_focus_info[layer][key] = entry
for (target_id, focus_id) in entry._focus_ids.items():
interactions.utils.sim_focus.FocusAdd(owner_sim, focus_id, entry.layer, entry.score, sim.id, target_id, entry._focus_bone, sims4.math.Vector3.ZERO())
def remove_focus_entry_for_sim(self, owner_sim, sim, layer):
if self._sim_focus_info is None:
return
key = self.get_key(layer, owner_sim, sim)
my_entry = self._sim_focus_info[layer].get(key)
if my_entry is not None:
for focus_id in my_entry._focus_ids.values():
interactions.utils.sim_focus.FocusDelete(owner_sim, sim.id, focus_id)
del self._sim_focus_info[layer][key]
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import division, print_function
from ase.atoms import Atoms as ASEAtoms, Atom as ASEAtom
from ase.symbols import Symbols as ASESymbols
import ast
from copy import copy
from collections import OrderedDict
import numpy as np
import warnings
import seekpath
from pyiron_atomistics.atomistics.structure.atom import Atom, ase_to_pyiron as ase_to_pyiron_atom
from pyiron_atomistics.atomistics.structure.neighbors import Neighbors, Tree
from pyiron_atomistics.atomistics.structure._visualize import Visualize
from pyiron_atomistics.atomistics.structure.analyse import Analyse
from pyiron_atomistics.atomistics.structure.symmetry import Symmetry
from pyiron_atomistics.atomistics.structure.sparse_list import SparseArray, SparseList
from pyiron_atomistics.atomistics.structure.periodic_table import (
PeriodicTable,
ChemicalElement
)
from pyiron_base import Settings, deprecate, deprecate_soon
from pyiron_atomistics.atomistics.structure.pyironase import publication
from pymatgen.io.ase import AseAtomsAdaptor
from scipy.spatial import cKDTree, Voronoi
__author__ = "Joerg Neugebauer, Sudarsan Surendralal"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "[email protected]"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class Atoms(ASEAtoms):
"""
The Atoms class represents all the information required to describe a structure at the atomic scale. This class is
derived from the `ASE atoms class`_.
Args:
elements (list/numpy.ndarray): List of strings containing the elements or a list of
atomistics.structure.periodic_table.ChemicalElement instances
numbers (list/numpy.ndarray): List of atomic numbers of elements
symbols (list/numpy.ndarray): List of chemical symbols
positions (list/numpy.ndarray): List of positions
scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates)
pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes
cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure
Note: Only one of elements/symbols or numbers should be assigned during initialization
Attributes:
indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms
.. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html
"""
def __init__(
self,
symbols=None,
positions=None,
numbers=None,
tags=None,
momenta=None,
masses=None,
magmoms=None,
charges=None,
scaled_positions=None,
cell=None,
pbc=None,
celldisp=None,
constraint=None,
calculator=None,
info=None,
indices=None,
elements=None,
dimension=None,
species=None,
**qwargs
):
if symbols is not None:
if elements is None:
elements = symbols
else:
raise ValueError("Only elements OR symbols should be given.")
if (
tags is not None
or momenta is not None
or masses is not None
or charges is not None
or celldisp is not None
or constraint is not None
or calculator is not None
or info is not None
):
s.logger.debug("Not supported parameter used!")
self._store_elements = dict()
self._species_to_index_dict = None
self._is_scaled = False
self._species = list()
self.indices = np.array([])
self.constraints = None
self._pse = PeriodicTable()
self._tag_list = SparseArray()
el_index_lst = list()
element_list = None
if numbers is not None: # for ASE compatibility
if not (elements is None):
raise AssertionError()
elements = self.numbers_to_elements(numbers)
if elements is not None:
el_object_list = None
if isinstance(elements, str):
element_list = self.convert_formula(elements)
elif isinstance(elements, (list, tuple, np.ndarray)):
if not all([isinstance(el, elements[0].__class__) for el in elements]):
object_list = list()
for el in elements:
if isinstance(el, str):
object_list.append(self.convert_element(el))
if isinstance(el, ChemicalElement):
object_list.append(el)
if isinstance(el, Atom):
object_list.append(el.element)
if isinstance(el, (int, np.integer)):
# pse = PeriodicTable()
object_list.append(self._pse.element(el))
el_object_list = object_list
if len(elements) == 0:
element_list = elements
else:
if isinstance(elements[0], (list, tuple, np.ndarray)):
elements = np.array(elements).flatten()
if isinstance(elements[0], str):
element_list = elements
elif isinstance(elements[0], ChemicalElement):
el_object_list = elements
elif isinstance(elements[0], Atom):
el_object_list = [el.element for el in elements]
positions = [el.position for el in elements]
elif elements.dtype in [int, np.integer]:
el_object_list = self.numbers_to_elements(elements)
else:
raise ValueError(
"Unknown static type for element in list: "
+ str(type(elements[0]))
)
if el_object_list is None:
el_object_list = [self.convert_element(el) for el in element_list]
# Create a list from a set but always preserve order
self.set_species(list(dict.fromkeys(el_object_list)))
el_index_lst = [self._species_to_index_dict[el] for el in el_object_list]
elif indices is not None:
el_index_lst = indices
if species is None:
raise ValueError("species must be given if indices is given, but is None.")
self.set_species(species)
self.indices = np.array(el_index_lst, dtype=int)
el_lst = [el.Abbreviation if el.Parent is None else el.Parent for el in self.species]
symbols = np.array([el_lst[el] for el in self.indices])
self._tag_list._length = len(symbols)
super(Atoms, self).__init__(symbols=symbols, positions=positions, numbers=None,
tags=tags, momenta=momenta, masses=masses,
magmoms=magmoms, charges=charges,
scaled_positions=scaled_positions, cell=cell,
pbc=pbc, celldisp=celldisp, constraint=constraint,
calculator=calculator, info=info)
self.bonds = None
self.units = {"length": "A", "mass": "u"}
self.set_initial_magnetic_moments(magmoms)
self._high_symmetry_points = None
self._high_symmetry_path = None
self.dimension = dimension
if len(self.positions) > 0:
self.dimension = len(self.positions[0])
else:
self.dimension = 0
self._visualize = Visualize(self)
self._analyse = Analyse(self)
@property
def spins(self):
"""
Magnetic spins for each atom in the structure
Returns:
numpy.ndarray/list: The magnetic moments for reach atom as a single value or a vector (non-collinear spins)
"""
if self.has("initial_magmoms"):
return self.arrays["initial_magmoms"]
else:
return
@spins.setter
def spins(self, val):
if val is not None:
val = np.asarray(val)
if self.has("initial_magmoms"):
try:
self.arrays["initial_magmoms"][:] = val
except ValueError as err:
if len(self.arrays["initial_magmoms"]) == len(val):
self.set_array('initial_magmoms', None)
self.arrays["initial_magmoms"] = val
else:
raise err
else:
self.new_array("initial_magmoms", val)
else:
self.set_array('initial_magmoms', None)
@property
def visualize(self):
return self._visualize
@property
def analyse(self):
return self._analyse
@property
def species(self):
"""
list: A list of atomistics.structure.periodic_table.ChemicalElement instances
"""
return self._species
# @species.setter
def set_species(self, value):
"""
Setting the species list
Args:
value (list): A list atomistics.structure.periodic_table.ChemicalElement instances
"""
if value is None:
return
value = list(value)
self._species_to_index_dict = {el: i for i, el in enumerate(value)}
self._species = value[:]
self._store_elements = {el.Abbreviation: el for el in value}
@property
def symbols(self):
"""
Get chemical symbols as a :class:`ase.symbols.Symbols` object.
The object works like ``atoms.numbers`` except its values
are strings. It supports in-place editing.
"""
sym_obj = Symbols(self.numbers)
sym_obj.structure = self
return sym_obj
@symbols.setter
def symbols(self, obj):
new_symbols = Symbols.fromsymbols(obj)
self.numbers[:] = new_symbols.numbers
@property
def elements(self):
"""
numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according
to the ordering of the atoms in the instance
"""
return np.array([self.species[el] for el in self.indices])
def get_high_symmetry_points(self):
"""
dictionary of high-symmetry points defined for this specific structure.
Returns:
dict: high_symmetry_points
"""
return self._high_symmetry_points
def _set_high_symmetry_points(self, new_high_symmetry_points):
"""
Sets new high symmetry points dictionary.
Args:
new_high_symmetry_points (dict): new high symmetry points
"""
if not isinstance(new_high_symmetry_points, dict):
raise ValueError("has to be dict!")
self._high_symmetry_points = new_high_symmetry_points
def add_high_symmetry_points(self, new_points):
"""
Adds new points to the dict of existing high symmetry points.
Args:
new_points (dict): Points to add
"""
if self.get_high_symmetry_points() is None:
raise AssertionError("Construct high symmetry points first. Use self.create_line_mode_structure().")
else:
self._high_symmetry_points.update(new_points)
def get_high_symmetry_path(self):
"""
Path used for band structure calculations
Returns:
dict: dict of pathes with start and end points.
"""
return self._high_symmetry_path
def _set_high_symmetry_path(self, new_path):
"""
Sets new list for the high symmetry path used for band structure calculations.
Args:
new_path (dict): dictionary of lists of tuples with start and end point.
E.G. {"my_path": [('Gamma', 'X'), ('X', 'Y')]}
"""
self._high_symmetry_path = new_path
def add_high_symmetry_path(self, path):
"""
Adds a new path to the dictionary of pathes for band structure calculations.
Args:
path (dict): dictionary of lists of tuples with start and end point.
E.G. {"my_path": [('Gamma', 'X'), ('X', 'Y')]}
"""
if self.get_high_symmetry_path() is None:
raise AssertionError("Construct high symmetry path first. Use self.create_line_mode_structure().")
for values_all in path.values():
for values in values_all:
if not len(values) == 2:
raise ValueError(
"'{}' is not a propper trace! It has to contain exactly 2 values! (start and end point)".format(
values))
for v in values:
if v not in self.get_high_symmetry_points().keys():
raise ValueError("'{}' is not a valid high symmetry point".format(v))
self._high_symmetry_path.update(path)
def add_tag(self, *args, **qwargs):
"""
Add tags to the atoms object.
Examples:
For selective dynamics::
>>> self.add_tag(selective_dynamics=[False, False, False])
"""
self._tag_list.add_tag(*args, **qwargs)
# @staticmethod
def numbers_to_elements(self, numbers):
"""
Convert atomic numbers in element objects (needed for compatibility with ASE)
Args:
numbers (list): List of Element Numbers (as Integers; default in ASE)
Returns:
list: A list of elements as needed for pyiron
"""
# pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags
atom_number_to_element = {}
for i_el in set(numbers):
i_el = int(i_el)
atom_number_to_element[i_el] = self._pse.element(i_el)
return [atom_number_to_element[i_el] for i_el in numbers]
def copy(self):
"""
Returns a copy of the instance
Returns:
pyiron.atomistics.structure.atoms.Atoms: A copy of the instance
"""
return self.__copy__()
def to_hdf(self, hdf, group_name="structure"):
"""
Save the object in a HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str):
Group name with which the object should be stored. This same name should be used to retrieve the object
"""
# import time
with hdf.open(group_name) as hdf_structure:
# time_start = time.time()
hdf_structure["TYPE"] = str(type(self))
for el in self.species:
if isinstance(el.tags, dict):
with hdf_structure.open("new_species") as hdf_species:
el.to_hdf(hdf_species)
hdf_structure["species"] = [el.Abbreviation for el in self.species]
hdf_structure["indices"] = self.indices
with hdf_structure.open("tags") as hdf_tags:
for tag in self._tag_list.keys():
tag_value = self._tag_list[tag]
if isinstance(tag_value, SparseList):
tag_value.to_hdf(hdf_tags, tag)
hdf_structure["units"] = self.units
hdf_structure["dimension"] = self.dimension
if self.cell is not None:
with hdf_structure.open("cell") as hdf_cell:
# Convert ASE cell object to numpy array before storing
hdf_cell["cell"] = np.array(self.cell)
hdf_cell["pbc"] = self.pbc
# hdf_structure["coordinates"] = self.positions # "Atomic coordinates"
hdf_structure["positions"] = self.positions # "Atomic coordinates"
hdf_structure["spins"] = self.spins
# potentials with explicit bonds (TIP3P, harmonic, etc.)
if self.bonds is not None:
hdf_structure["explicit_bonds"] = self.bonds
# print ('time in atoms.to_hdf: ', time.time() - time_start)
if self._high_symmetry_points is not None:
hdf_structure["high_symmetry_points"] = self._high_symmetry_points
if self._high_symmetry_path is not None:
hdf_structure["high_symmetry_path"] = self._high_symmetry_path
hdf_structure["info"] = self.info
def from_hdf(self, hdf, group_name="structure"):
"""
Retrieve the object from a HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str): Group name from which the Atoms object is retreived.
Returns:
pyiron_atomistics.structure.atoms.Atoms: The retrieved atoms class
"""
if "indices" in hdf[group_name].list_nodes():
with hdf.open(group_name) as hdf_atoms:
if "new_species" in hdf_atoms.list_groups():
with hdf_atoms.open("new_species") as hdf_species:
self._pse.from_hdf(hdf_species)
el_object_list = [
self.convert_element(el, self._pse) for el in hdf_atoms["species"]
]
self.indices = hdf_atoms["indices"]
self._tag_list._length = len(self.indices)
self.set_species(el_object_list)
self.bonds = None
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
self.units = hdf_atoms["units"]
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
# Backward compatibility
position_tag = "positions"
if position_tag not in hdf_atoms.list_nodes():
position_tag = "coordinates"
if "is_absolute" in hdf_atoms.list_nodes():
if not tr_dict[hdf_atoms["is_absolute"]]:
self.set_scaled_positions(hdf_atoms[position_tag])
else:
self.arrays['positions'] = hdf_atoms[position_tag]
else:
self.arrays['positions'] = hdf_atoms[position_tag]
self.arrays['numbers'] = self.get_atomic_numbers()
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "spins" in hdf_atoms.list_nodes():
self.spins = hdf_atoms["spins"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(
my_list, length=len(self)
)
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {
i: val
for i, val in zip(
my_dict["index"], my_dict["values"]
)
}
self._tag_list[tag] = SparseList(
my_dict, length=len(self)
)
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
self._high_symmetry_points = None
if "high_symmetry_points" in hdf_atoms.list_nodes():
self._high_symmetry_points = hdf_atoms["high_symmetry_points"]
self._high_symmetry_path = None
if "high_symmetry_path" in hdf_atoms.list_nodes():
self._high_symmetry_path = hdf_atoms["high_symmetry_path"]
if "info" in hdf_atoms.list_nodes():
self.info = hdf_atoms["info"]
return self
else:
return self._from_hdf_old(hdf, group_name)
def _from_hdf_old(self, hdf, group_name="structure"):
"""
This function exits merely for the purpose of backward compatibility
"""
with hdf.open(group_name) as hdf_atoms:
self._pse = PeriodicTable()
if "species" in hdf_atoms.list_groups():
with hdf_atoms.open("species") as hdf_species:
self._pse.from_hdf(hdf_species)
chemical_symbols = np.array(hdf_atoms["elements"], dtype=str)
el_object_list = [
self.convert_element(el, self._pse) for el in chemical_symbols
]
self.set_species(list(set(el_object_list)))
self.indices = [self._species_to_index_dict[el] for el in el_object_list]
self._tag_list._length = len(self)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {
i: val
for i, val in zip(my_dict["index"], my_dict["values"])
}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]:
self.positions = hdf_atoms["coordinates"]
else:
self.set_scaled_positions(hdf_atoms["coordinates"])
self.units = hdf_atoms["units"]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
self._high_symmetry_points = None
if "high_symmetry_points" in hdf_atoms.list_nodes():
self._high_symmetry_points = hdf_atoms["high_symmetry_points"]
return self
def select_index(self, el):
"""
Returns the indices of a given element in the structure
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement/list): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
if isinstance(el, str):
return np.where(self.get_chemical_symbols() == el)[0]
elif isinstance(el, ChemicalElement):
return np.where([e == el for e in self.get_chemical_elements()])[0]
if isinstance(el, (list, np.ndarray)):
if isinstance(el[0], str):
return np.where(np.isin(self.get_chemical_symbols(), el))[0]
elif isinstance(el[0], ChemicalElement):
return np.where([e in el for e in self.get_chemical_elements()])[0]
def select_parent_index(self, el):
"""
Returns the indices of a given element in the structure ignoring user defined elements
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
parent_basis = self.get_parent_basis()
return parent_basis.select_index(el)
def get_tags(self):
"""
Returns the keys of the stored tags of the structure
Returns:
dict_keys: Keys of the stored tags
"""
return self._tag_list.keys()
def convert_element(self, el, pse=None):
"""
Convert a string or an atom instance into a ChemicalElement instance
Args:
el (str/atomistics.structure.atom.Atom): String or atom instance from which the element should
be generated
pse (atomistics.structure.periodictable.PeriodicTable): PeriodicTable instance from which the element
is generated (optional)
Returns:
atomistics.structure.periodictable.ChemicalElement: The required chemical element
"""
if el in list(self._store_elements.keys()):
return self._store_elements[el]
if isinstance(el, str): # as symbol
element = Atom(el, pse=pse).element
elif isinstance(el, Atom):
element = el.element
el = el.element.Abbreviation
elif isinstance(el, ChemicalElement):
element = el
el = el.Abbreviation
else:
raise ValueError("Unknown static type to specify a element")
self._store_elements[el] = element
if hasattr(self, "species"):
if element not in self.species:
self._species.append(element)
self.set_species(self._species)
return element
def get_chemical_formula(self):
"""
Returns the chemical formula of structure
Returns:
str: The chemical formula as a string
"""
species = self.get_number_species_atoms()
formula = ""
for string_sym, num in species.items():
if num == 1:
formula += str(string_sym)
else:
formula += str(string_sym) + str(num)
return formula
def get_chemical_indices(self):
"""
Returns the list of chemical indices as ordered in self.species
Returns:
numpy.ndarray: A list of chemical indices
"""
return self.indices.copy()
def get_atomic_numbers(self):
"""
Returns the atomic numbers of all the atoms in the structure
Returns:
numpy.ndarray: A list of atomic numbers
"""
el_lst = [el.AtomicNumber for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_chemical_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure
Returns:
numpy.ndarray: A list of chemical symbols
"""
el_lst = [el.Abbreviation for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_parent_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure even for user defined elements
Returns:
numpy.ndarray: A list of chemical symbols
"""
sp_parent_list = list()
for sp in self.species:
if isinstance(sp.Parent, (float, type(None))):
sp_parent_list.append(sp.Abbreviation)
else:
sp_parent_list.append(sp.Parent)
return np.array([sp_parent_list[i] for i in self.indices])
def get_parent_basis(self):
"""
Returns the basis with all user defined/special elements as the it's parent
Returns:
pyiron.atomistics.structure.atoms.Atoms: Structure without any user defined elements
"""
parent_basis = copy(self)
new_species = np.array(parent_basis.species)
for i, sp in enumerate(new_species):
if not isinstance(sp.Parent, (float, type(None))):
pse = PeriodicTable()
new_species[i] = pse.element(sp.Parent)
sym_list = [el.Abbreviation for el in new_species]
if len(sym_list) != len(np.unique(sym_list)):
uni, ind, inv_ind = np.unique(
sym_list, return_index=True, return_inverse=True
)
new_species = new_species[ind].copy()
parent_basis.set_species(list(new_species))
indices_copy = parent_basis.indices.copy()
for i, ind_ind in enumerate(inv_ind):
indices_copy[parent_basis.indices == i] = ind_ind
parent_basis.indices = indices_copy
return parent_basis
parent_basis.set_species(list(new_species))
return parent_basis
def get_chemical_elements(self):
"""
Returns the list of chemical element instances
Returns:
numpy.ndarray: A list of chemical element instances
"""
return self.elements
def get_number_species_atoms(self):
"""
Returns a dictionary with the species in the structure and the corresponding count in the structure
Returns:
collections.OrderedDict: An ordered dictionary with the species and the corresponding count
"""
count = OrderedDict()
# print "sorted: ", sorted(set(self.elements))
for el in sorted(set(self.get_chemical_symbols())):
count[el] = 0
for el in self.get_chemical_symbols():
count[el] += 1
return count
def get_species_symbols(self):
"""
Returns the symbols of the present species
Returns:
numpy.ndarray: List of the symbols of the species
"""
return np.array(sorted([el.Abbreviation for el in self.species]))
def get_species_objects(self):
"""
Returns:
"""
el_set = self.species
el_sym_lst = {el.Abbreviation: i for i, el in enumerate(el_set)}
el_sorted = self.get_species_symbols()
return [el_set[el_sym_lst[el]] for el in el_sorted]
def get_number_of_species(self):
"""
Returns:
"""
return len(self.species)
def get_number_of_degrees_of_freedom(self):
"""
Returns:
"""
return len(self) * self.dimension
def get_center_of_mass(self):
"""
Returns:
com (float): center of mass in A
"""
masses = self.get_masses()
return np.einsum("i,ij->j", masses, self.positions) / np.sum(masses)
def get_masses(self):
"""
Gets the atomic masses of all atoms in the structure
Returns:
numpy.ndarray: Array of masses
"""
el_lst = [el.AtomicMass for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_masses_dof(self):
"""
Returns:
"""
dim = self.dimension
return np.repeat(self.get_masses(), dim)
def get_volume(self, per_atom=False):
"""
Args:
per_atom (bool): True if volume per atom is to be returned
Returns:
volume (float): Volume in A**3
"""
if per_atom:
return np.abs(np.linalg.det(self.cell)) / len(self)
else:
return np.abs(np.linalg.det(self.cell))
def get_density(self):
"""
Returns the density in g/cm^3
Returns:
float: Density of the structure
"""
# conv_factor = Ang3_to_cm3/scipi.constants.Avogadro
# with Ang3_to_cm3 = 1e24
conv_factor = 1.660539040427164
return conv_factor * np.sum(self.get_masses()) / self.get_volume()
def get_number_of_atoms(self):
"""
Returns:
"""
# assert(len(self) == np.sum(self.get_number_species_atoms().values()))
return len(self)
@deprecate
def set_absolute(self):
if self._is_scaled:
self._is_scaled = False
@deprecate
def set_relative(self):
if not self._is_scaled:
self._is_scaled = True
def get_wrapped_coordinates(self, positions, epsilon=1.0e-8):
"""
Return coordinates in wrapped in the periodic cell
Args:
positions (list/numpy.ndarray): Positions
epsilon (float): displacement to add to avoid wrapping of atoms at borders
Returns:
numpy.ndarray: Wrapped positions
"""
scaled_positions = np.einsum(
'ji,nj->ni', np.linalg.inv(self.cell), np.asarray(positions).reshape(-1, 3)
)
if any(self.pbc):
scaled_positions[:, self.pbc] -= np.floor(scaled_positions[:, self.pbc]+epsilon)
new_positions = np.einsum('ji,nj->ni', self.cell, scaled_positions)
return new_positions.reshape(np.asarray(positions).shape)
def center_coordinates_in_unit_cell(self, origin=0, eps=1e-4):
"""
Wrap atomic coordinates within the supercell.
Modifies object in place and returns itself.
Args:
origin (float): 0 to confine between 0 and 1, -0.5 to confine between -0.5 and 0.5
eps (float): Tolerance to detect atoms at cell edges
Returns:
:class:`pyiron_atomistics.atomistics.structure.atoms.Atoms`: reference to this structure
"""
if any(self.pbc):
self.set_scaled_positions(
np.mod(self.get_scaled_positions(wrap=False) + eps, 1) - eps + origin
)
return self
def create_line_mode_structure(self,
with_time_reversal=True,
recipe='hpkot',
threshold=1e-07,
symprec=1e-05,
angle_tolerance=-1.0,
):
"""
Uses 'seekpath' to create a new structure with high symmetry points and path for band structure calculations.
Args:
with_time_reversal (bool): if False, and the group has no inversion symmetry,
additional lines are returned as described in the HPKOT paper.
recipe (str): choose the reference publication that defines the special points and paths.
Currently, only 'hpkot' is implemented.
threshold (float): the threshold to use to verify if we are in and edge case
(e.g., a tetragonal cell, but a==c). For instance, in the tI lattice, if abs(a-c) < threshold,
a EdgeCaseWarning is issued. Note that depending on the bravais lattice,
the meaning of the threshold is different (angle, length, …)
symprec (float): the symmetry precision used internally by SPGLIB
angle_tolerance (float): the angle_tolerance used internally by SPGLIB
Returns:
pyiron.atomistics.structure.atoms.Atoms: new structure
"""
input_structure = (self.cell, self.get_scaled_positions(), self.indices)
sp_dict = seekpath.get_path(structure=input_structure,
with_time_reversal=with_time_reversal,
recipe=recipe,
threshold=threshold,
symprec=symprec,
angle_tolerance=angle_tolerance,
)
original_element_list = [el.Abbreviation for el in self.species]
element_list = [original_element_list[l] for l in sp_dict["primitive_types"]]
positions = sp_dict["primitive_positions"]
pbc = self.pbc
cell = sp_dict["primitive_lattice"]
struc_new = Atoms(elements=element_list, scaled_positions=positions, pbc=pbc, cell=cell)
struc_new._set_high_symmetry_points(sp_dict["point_coords"])
struc_new._set_high_symmetry_path({"full": sp_dict["path"]})
return struc_new
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
def set_repeat(self, vec):
self *= vec
def repeat_points(self, points, rep, centered=False):
"""
Return points with repetition given according to periodic boundary conditions
Args:
points (np.ndarray/list): xyz vector or list/array of xyz vectors
rep (int/list/np.ndarray): Repetition in each direction.
If int is given, the same value is used for
every direction
centered (bool): Whether the original points should be in the center of
repeated points.
Returns:
(np.ndarray) repeated points
"""
n = np.array([rep]).flatten()
if len(n)==1:
n = np.tile(n, 3)
if len(n)!=3:
raise ValueError('rep must be an integer or a list of 3 integers')
vector = np.array(points)
if vector.shape[-1]!=3:
raise ValueError('points must be an xyz vector or a list/array of xyz vectors')
if centered and np.mod(n, 2).sum()!=3:
warnings.warn('When centered, only odd number of repetition should be used')
v = vector.reshape(-1, 3)
n_lst = []
for nn in n:
if centered:
n_lst.append(np.arange(nn)-int(nn/2))
else:
n_lst.append(np.arange(nn))
meshgrid = np.meshgrid(n_lst[0], n_lst[1], n_lst[2])
v_repeated = np.einsum('ni,ij->nj', np.stack(meshgrid, axis=-1).reshape(-1, 3), self.cell)
v_repeated = v_repeated[:, np.newaxis, :]+v[np.newaxis, :, :]
return v_repeated.reshape((-1,)+vector.shape)
def reset_absolute(self, is_absolute):
raise NotImplementedError("This function was removed!")
@deprecate("Use Atoms.analyse.pyscal_cna_adaptive() with ovito_compatibility=True instead")
def analyse_ovito_cna_adaptive(self, mode="total"):
return self._analyse.pyscal_cna_adaptive(mode=mode, ovito_compatibility=True)
analyse_ovito_cna_adaptive.__doc__ = Analyse.pyscal_cna_adaptive.__doc__
@deprecate('Use Atoms.analyse.pyscal_centro_symmetry() instead')
def analyse_ovito_centro_symmetry(self, num_neighbors=12):
return self._analyse.pyscal_centro_symmetry(num_neighbors=num_neighbors)
analyse_ovito_centro_symmetry.__doc__ = Analyse.pyscal_centro_symmetry.__doc__
@deprecate("Use Atoms.analyse.pyscal_voronoi_volume() instead")
def analyse_ovito_voronoi_volume(self):
return self._analyse.pyscal_voronoi_volume()
analyse_ovito_voronoi_volume.__doc__ = Analyse.pyscal_voronoi_volume.__doc__
@deprecate("Use Atoms.analyse.pyscal_steinhardt_parameter() instead")
def analyse_pyscal_steinhardt_parameter(self, neighbor_method="cutoff", cutoff=0, n_clusters=2,
q=(4, 6), averaged=False, clustering=True):
return self._analyse.pyscal_steinhardt_parameter(
neighbor_method=neighbor_method, cutoff=cutoff, n_clusters=n_clusters,
q=q, averaged=averaged, clustering=clustering
)
analyse_pyscal_steinhardt_parameter.__doc__ = Analyse.pyscal_steinhardt_parameter.__doc__
@deprecate("Use Atoms.analyse.pyscal_cna_adaptive() instead")
def analyse_pyscal_cna_adaptive(self, mode="total", ovito_compatibility=False):
return self._analyse.pyscal_cna_adaptive(mode=mode, ovito_compatibility=ovito_compatibility)
analyse_pyscal_cna_adaptive.__doc__ = Analyse.pyscal_cna_adaptive.__doc__
@deprecate("Use Atoms.analyse.pyscal_centro_symmetry() instead")
def analyse_pyscal_centro_symmetry(self, num_neighbors=12):
return self._analyse.pyscal_centro_symmetry(num_neighbors=num_neighbors)
analyse_pyscal_centro_symmetry.__doc__ = Analyse.pyscal_centro_symmetry.__doc__
@deprecate("Use Atoms.analyse.pyscal_diamond_structure() instead")
def analyse_pyscal_diamond_structure(self, mode="total", ovito_compatibility=False):
return self._analyse.pyscal_diamond_structure(mode=mode, ovito_compatibility=ovito_compatibility)
analyse_pyscal_diamond_structure.__doc__ = Analyse.pyscal_diamond_structure.__doc__
@deprecate("Use Atoms.analyse.pyscal_voronoi_volume() instead")
def analyse_pyscal_voronoi_volume(self):
return self._analyse.pyscal_voronoi_volume()
analyse_pyscal_voronoi_volume.__doc__ = Analyse.pyscal_voronoi_volume.__doc__
@deprecate("Use get_symmetry()['equivalent_atoms'] instead")
def analyse_phonopy_equivalent_atoms(self):
from pyiron_atomistics.atomistics.structure.phonopy import analyse_phonopy_equivalent_atoms
return analyse_phonopy_equivalent_atoms(atoms=self)
def plot3d(
self,
mode='NGLview',
show_cell=True,
show_axes=True,
camera="orthographic",
spacefill=True,
particle_size=1.0,
select_atoms=None,
background="white",
color_scheme=None,
colors=None,
scalar_field=None,
scalar_start=None,
scalar_end=None,
scalar_cmap=None,
vector_field=None,
vector_color=None,
magnetic_moments=False,
view_plane=np.array([0, 0, 1]),
distance_from_camera=1.0,
opacity=1.0
):
return self.visualize.plot3d(
mode=mode,
show_cell=show_cell,
show_axes=show_axes,
camera=camera,
spacefill=spacefill,
particle_size=particle_size,
select_atoms=select_atoms,
background=background,
color_scheme=color_scheme,
colors=colors,
scalar_field=scalar_field,
scalar_start=scalar_start,
scalar_end=scalar_end,
scalar_cmap=scalar_cmap,
vector_field=vector_field,
vector_color=vector_color,
magnetic_moments=magnetic_moments,
view_plane=view_plane,
distance_from_camera=distance_from_camera,
opacity=opacity,
)
plot3d.__doc__ = Visualize.plot3d.__doc__
def pos_xyz(self):
"""
Returns:
"""
x = self.positions[:, 0]
y = self.positions[:, 1]
z = self.positions[:, 2]
return x, y, z
def scaled_pos_xyz(self):
"""
Returns:
"""
xyz = self.get_scaled_positions(wrap=False)
return xyz[:, 0], xyz[:, 1], xyz[:, 2]
def get_vertical_length(self, norm_order=2):
"""
Return the length of the cell in each direction projected on the vector vertical to the
plane.
Example:
For a cell `[[1, 1, 0], [0, 1, 0], [0, 0, 1]]`, this function returns
`[1., 0.70710678, 1.]` because the first cell vector is projected on the vector vertical
to the yz-plane (as well as the y component on the xz-plane).
Args:
norm_order (int): Norm order (cf. numpy.linalg.norm)
"""
return np.linalg.det(self.cell)/np.linalg.norm(
np.cross(np.roll(self.cell, -1, axis=0), np.roll(self.cell, 1, axis=0)),
axis=-1,
ord=norm_order,
)
def get_extended_positions(self, width, return_indices=False, norm_order=2, positions=None):
"""
Get all atoms in the boundary around the supercell which have a distance
to the supercell boundary of less than dist
Args:
width (float): width of the buffer layer on every periodic box side within which all
atoms across periodic boundaries are chosen.
return_indices (bool): Whether or not return the original indices of the appended
atoms.
norm_order (float): Order of Lp-norm.
positions (numpy.ndarray): Positions for which the extended positions are returned.
If None, the atom positions of the structure are used.
Returns:
numpy.ndarray: Positions of all atoms in the extended box, indices of atoms in
their original option (if return_indices=True)
"""
if width<0:
raise ValueError('Invalid width')
if positions is None:
positions = self.positions
if width==0:
if return_indices:
return positions, np.arange(len(positions))
return positions
width /= self.get_vertical_length(norm_order=norm_order)
rep = 2*np.ceil(width).astype(int)*self.pbc+1
rep = [np.arange(r)-int(r/2) for r in rep]
meshgrid = np.meshgrid(rep[0], rep[1], rep[2])
meshgrid = np.stack(meshgrid, axis=-1).reshape(-1, 3)
v_repeated = np.einsum('ni,ij->nj', meshgrid, self.cell)
v_repeated = v_repeated[:,np.newaxis,:]+positions[np.newaxis,:,:]
v_repeated = v_repeated.reshape(-1, 3)
indices = np.tile(np.arange(len(positions)), len(meshgrid))
dist = v_repeated-np.sum(self.cell*0.5, axis=0)
dist = np.absolute(np.einsum('ni,ij->nj', dist+1e-8, np.linalg.inv(self.cell)))-0.5
check_dist = np.all(dist-width<0, axis=-1)
indices = indices[check_dist]%len(positions)
v_repeated = v_repeated[check_dist]
if return_indices:
return v_repeated, indices
return v_repeated
@deprecate("Use get_neighbors and call numbers_of_neighbors")
def get_numbers_of_neighbors_in_sphere(
self,
cutoff_radius=10,
num_neighbors=None,
id_list=None,
width_buffer=1.2,
):
"""
Function to compute the maximum number of neighbors in a sphere around each atom.
Args:
cutoff_radius (float): Upper bound of the distance to which the search must be done
num_neighbors (int/None): maximum number of neighbors found
id_list (list): list of atoms the neighbors are to be looked for
width_buffer (float): width of the layer to be added to account for pbc.
Returns:
(np.ndarray) : for each atom the number of neighbors found in the sphere of radius
cutoff_radius (<= num_neighbors if specified)
"""
return self.get_neighbors(
cutoff_radius=cutoff_radius,
num_neighbors=num_neighbors,
id_list=id_list,
width_buffer=width_buffer,
).numbers_of_neighbors
@deprecate(allow_ragged="use `mode='ragged'` instead.")
def get_neighbors(
self,
num_neighbors=12,
tolerance=2,
id_list=None,
cutoff_radius=np.inf,
width_buffer=1.2,
allow_ragged=None,
mode='filled',
norm_order=2,
):
"""
Args:
num_neighbors (int): number of neighbors
tolerance (int): tolerance (round decimal points) used for computing neighbor shells
id_list (list): list of atoms the neighbors are to be looked for
cutoff_radius (float): Upper bound of the distance to which the search must be done
width_buffer (float): width of the layer to be added to account for pbc.
allow_ragged (bool): (Deprecated; use mode) Whether to allow ragged list of arrays or
rectangular numpy.ndarray filled with np.inf for values outside cutoff_radius
mode (str): Representation of per-atom quantities (distances etc.). Choose from
'filled', 'ragged' and 'flattened'.
norm_order (int): Norm to use for the neighborhood search and shell recognition. The
definition follows the conventional Lp norm (cf.
https://en.wikipedia.org/wiki/Lp_space). This is an feature and for anything
other than norm_order=2, there is no guarantee that this works flawlessly.
Returns:
pyiron.atomistics.structure.atoms.Neighbors: Neighbors instances with the neighbor
indices, distances and vectors
"""
neigh = self._get_neighbors(
num_neighbors=num_neighbors,
tolerance=tolerance,
id_list=id_list,
cutoff_radius=cutoff_radius,
width_buffer=width_buffer,
norm_order=norm_order,
)
neigh._set_mode(mode)
if allow_ragged is not None:
neigh.allow_ragged = allow_ragged
return neigh
@deprecate(allow_ragged="use `mode='ragged'` instead.")
@deprecate("Use get_neighbors", version="1.0.0")
def get_neighbors_by_distance(
self,
cutoff_radius=5,
num_neighbors=None,
tolerance=2,
id_list=None,
width_buffer=1.2,
allow_ragged=None,
mode='ragged',
norm_order=2,
):
return self.get_neighbors(
cutoff_radius=cutoff_radius,
num_neighbors=num_neighbors,
tolerance=tolerance,
id_list=id_list,
width_buffer=width_buffer,
allow_ragged=allow_ragged,
mode=mode,
norm_order=norm_order,
)
get_neighbors_by_distance.__doc__ = get_neighbors.__doc__
def _get_neighbors(
self,
num_neighbors=12,
tolerance=2,
id_list=None,
cutoff_radius=np.inf,
width_buffer=1.2,
get_tree=False,
norm_order=2,
):
if num_neighbors is not None and num_neighbors<=0:
raise ValueError('invalid number of neighbors')
if width_buffer<0:
raise ValueError('width_buffer must be a positive float')
if get_tree:
neigh = Tree(ref_structure=self)
else:
neigh = Neighbors(ref_structure=self, tolerance=tolerance)
neigh._norm_order = norm_order
width = neigh._estimate_width(
num_neighbors=num_neighbors,
cutoff_radius=cutoff_radius,
width_buffer=width_buffer,
)
extended_positions, neigh._wrapped_indices = self.get_extended_positions(
width, return_indices=True, norm_order=norm_order
)
neigh._extended_positions = extended_positions
neigh._tree = cKDTree(extended_positions)
if get_tree:
return neigh
positions = self.positions
if id_list is not None:
positions = positions[np.array(id_list)]
neigh._get_neighborhood(
positions=positions,
num_neighbors=num_neighbors,
cutoff_radius=cutoff_radius,
exclude_self=True,
width_buffer=width_buffer,
)
if neigh._check_width(width=width, pbc=self.pbc):
warnings.warn('width_buffer may have been too small - '
'most likely not all neighbors properly assigned')
return neigh
def get_neighborhood(
self,
positions,
num_neighbors=12,
cutoff_radius=np.inf,
width_buffer=1.2,
mode='filled',
norm_order=2,
):
"""
Args:
position: Position in a box whose neighborhood information is analysed
num_neighbors (int): Number of nearest neighbors
cutoff_radius (float): Upper bound of the distance to which the search is to be done
width_buffer (float): Width of the layer to be added to account for pbc.
mode (str): Representation of per-atom quantities (distances etc.). Choose from
'filled', 'ragged' and 'flattened'.
norm_order (int): Norm to use for the neighborhood search and shell recognition. The
definition follows the conventional Lp norm (cf.
https://en.wikipedia.org/wiki/Lp_space). This is an feature and for anything
other than norm_order=2, there is no guarantee that this works flawlessly.
Returns:
pyiron.atomistics.structure.atoms.Tree: Neighbors instances with the neighbor
indices, distances and vectors
"""
neigh = self._get_neighbors(
num_neighbors=num_neighbors,
cutoff_radius=cutoff_radius,
width_buffer=width_buffer,
get_tree=True,
norm_order=norm_order,
)
neigh._set_mode(mode)
return neigh._get_neighborhood(
positions=positions,
num_neighbors=num_neighbors,
cutoff_radius=cutoff_radius,
)
@deprecate("Use neigh.find_neighbors_by_vector() instead (after calling neigh = structure.get_neighbors())",
version="1.0.0")
def find_neighbors_by_vector(self, vector, return_deviation=False, num_neighbors=96):
neighbors = self.get_neighbors(num_neighbors=num_neighbors)
return neighbors.find_neighbors_by_vector(vector=vector, return_deviation=return_deviation)
find_neighbors_by_vector.__doc__ = Neighbors.find_neighbors_by_vector.__doc__
@deprecate("Use neigh.get_shell_matrix() instead (after calling neigh = structure.get_neighbors())",
version="1.0.0")
def get_shell_matrix(
self, id_list=None, chemical_pair=None, num_neighbors=100, tolerance=2,
cluster_by_distances=False, cluster_by_vecs=False
):
neigh_list = self.get_neighbors(
num_neighbors=num_neighbors, id_list=id_list, tolerance=tolerance
)
return neigh_list.get_shell_matrix(
chemical_pair=chemical_pair,
cluster_by_distances=cluster_by_distances,
cluster_by_vecs=cluster_by_vecs
)
get_shell_matrix.__doc__ = Neighbors.get_shell_matrix.__doc__
def occupy_lattice(self, **qwargs):
"""
Replaces specified indices with a given species
"""
new_species = list(np.array(self.species).copy())
new_indices = np.array(self.indices.copy())
for key, i_list in qwargs.items():
el = self._pse.element(key)
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
new_species.append(el)
new_indices[i_list] = len(new_species) - 1
else:
index = np.argwhere(np.array(new_species) == el).flatten()
new_indices[i_list] = index
delete_species_indices = list()
retain_species_indices = list()
for i, el in enumerate(new_species):
if len(np.argwhere(new_indices == i).flatten()) == 0:
delete_species_indices.append(i)
else:
retain_species_indices.append(i)
for i in delete_species_indices:
new_indices[new_indices >= i] += -1
new_species = np.array(new_species)[retain_species_indices]
self.set_species(new_species)
self.indices = new_indices
@deprecate("Use neigh.cluster_analysis() instead (after calling neigh = structure.get_neighbors())",
version="1.0.0")
def cluster_analysis(
self, id_list, neighbors=None, radius=None, return_cluster_sizes=False
):
"""
Args:
id_list:
neighbors:
radius:
return_cluster_sizes:
Returns:
"""
if neighbors is None:
if radius is None:
neigh = self.get_neighbors(num_neighbors=100)
indices = np.unique(neigh.shells[0][neigh.shells[0]<=2], return_index=True)[1]
radius = neigh.distances[0][indices]
radius = np.mean(radius)
# print "radius: ", radius
neighbors = self.get_neighbors_by_distance(cutoff_radius=radius)
return neighbors.cluster_analysis(id_list=id_list, return_cluster_sizes=return_cluster_sizes)
# TODO: combine with corresponding routine in plot3d
@deprecate("Use neigh.get_bonds() instead (after calling neigh = structure.get_neighbors())",
version="1.0.0")
def get_bonds(self, radius=np.inf, max_shells=None, prec=0.1, num_neighbors=20):
"""
Args:
radius:
max_shells:
prec: minimum distance between any two clusters (if smaller considered to be single cluster)
num_neighbors:
Returns:
"""
neighbors = self.get_neighbors_by_distance(
cutoff_radius=radius, num_neighbors=num_neighbors
)
return neighbors.get_bonds(radius=radius, max_shells=max_shells, prec=prec)
def get_symmetry(
self, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0
):
"""
Args:
use_magmoms (bool): Whether to consider magnetic moments (cf.
get_initial_magnetic_moments())
use_elements (bool): If False, chemical elements will be ignored
symprec (float): Symmetry search precision
angle_tolerance (float): Angle search tolerance
Returns:
symmetry (:class:`pyiron.atomistics.structure.symmetry.Symmetry`): Symmetry class
"""
return Symmetry(
self,
use_magmoms=use_magmoms,
use_elements=use_elements,
symprec=symprec,
angle_tolerance=angle_tolerance
)
@deprecate('Use structure.get_symmetry().symmetrize_vectors()')
def symmetrize_vectors(
self, vectors, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0
):
"""
Symmetrization of natom x 3 vectors according to box symmetries
Args:
vectors (ndarray/list): natom x 3 array to symmetrize
use_magmoms (bool): cf. get_symmetry
use_elements (bool): cf. get_symmetry
symprec (float): cf. get_symmetry
angle_tolerance (float): cf. get_symmetry
Returns:
(np.ndarray) symmetrized vectors
"""
return self.get_symmetry(
use_magmoms=use_magmoms,
use_elements=use_elements,
symprec=symprec,
angle_tolerance=angle_tolerance
).symmetrize_vectors(vectors=vectors)
@deprecate('Use structure.get_symmetry().get_arg_equivalent_sites() instead')
def group_points_by_symmetry(
self, points, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0
):
"""
This function classifies the points into groups according to the box symmetry given by
spglib.
Args:
points: (np.array/list) nx3 array which contains positions
use_magmoms (bool): Whether to consider magnetic moments (cf.
get_initial_magnetic_moments())
use_elements (bool): If False, chemical elements will be ignored
symprec (float): Symmetry search precision
angle_tolerance (float): Angle search tolerance
Returns: list of arrays containing geometrically equivalent positions
It is possible that the original points are not found in the returned list, as the
positions outsie the box will be projected back to the box.
"""
return self.get_symmetry(
use_magmoms=use_magmoms,
use_elements=use_elements,
symprec=symprec,
angle_tolerance=angle_tolerance
).get_arg_equivalent_sites(points)
@deprecate('Use structure.get_symmetry().get_arg_equivalent_sites() instead')
def get_equivalent_points(self, points, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
points (list/ndarray): 3d vector
use_magmoms (bool): cf. get_symmetry()
use_elements (bool): cf. get_symmetry()
symprec (float): cf. get_symmetry()
angle_tolerance (float): cf. get_symmetry()
Returns:
(ndarray): array of equivalent points with respect to box symmetries
"""
return self.get_symmetry(
use_magmoms=use_magmoms,
use_elements=use_elements,
symprec=symprec,
angle_tolerance=angle_tolerance
).get_arg_equivalent_sites(points)
@deprecate('Use structure.get_symmetry().info instead')
def get_symmetry_dataset(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
return self.get_symmetry(symprec=symprec, angle_tolerance=angle_tolerance).info
@deprecate('Use structure.get_symmetry().spacegroup instead')
def get_spacegroup(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
return self.get_symmetry(symprec=symprec, angle_tolerance=angle_tolerance).spacegroup
@deprecate('Use structure.get_symmetry().refine_cell() instead')
def refine_cell(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
return self.get_symmetry(symprec=symprec, angle_tolerance=angle_tolerance).refine_cell()
@deprecate('Use structure.get_symmetry().primitive_cell instead')
def get_primitive_cell(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
"""
return self.get_symmetry(symprec=symprec, angle_tolerance=angle_tolerance).primitive_cell
@deprecate('Use structure.get_symmetry().get_ir_reciprocal_mesh() instead')
def get_ir_reciprocal_mesh(
self,
mesh,
is_shift=np.zeros(3, dtype="intc"),
is_time_reversal=True,
symprec=1e-5,
):
"""
Args:
mesh:
is_shift:
is_time_reversal:
symprec:
Returns:
"""
return self.get_symmetry(symprec=symprec).get_ir_reciprocal_mesh(
mesh=mesh,
is_shift=is_shift,
is_time_reversal=is_time_reversal,
)
def get_majority_species(self, return_count=False):
"""
This function returns the majority species and their number in the box
Returns:
number of atoms of the majority species, chemical symbol and chemical index
"""
el_dict = self.get_number_species_atoms()
el_num = list(el_dict.values())
el_name = list(el_dict.keys())
if np.sum(np.array(el_num) == np.max(el_num)) > 1:
warnings.warn("There are more than one majority species")
symbol_to_index = dict(
zip(self.get_chemical_symbols(), self.get_chemical_indices())
)
max_index = np.argmax(el_num)
return {
"symbol": el_name[max_index],
"count": int(np.max(el_num)),
"index": symbol_to_index[el_name[max_index]],
}
def close(self):
# TODO: implement
pass
@deprecate("Use Atoms.analyse.pyscal_voronoi_volume() instead")
def get_voronoi_volume(self):
return self._analyse.pyscal_voronoi_volume()
get_voronoi_volume.__doc__ = Analyse.pyscal_voronoi_volume.__doc__
def is_skewed(self, tolerance=1.0e-8):
"""
Check whether the simulation box is skewed/sheared. The algorithm compares the box volume
and the product of the box length in each direction. If these numbers do not match, the box
is considered to be skewed and the function returns True
Args:
tolerance (float): Relative tolerance above which the structure is considered as skewed
Returns:
(bool): Whether the box is skewed or not.
"""
volume = self.get_volume()
prod = np.linalg.norm(self.cell, axis=-1).prod()
if volume > 0:
if abs(volume-prod)/volume < tolerance:
return False
return True
def find_mic(self, v, vectors=True):
"""
Find vectors following minimum image convention (mic). In principle this
function does the same as ase.geometry.find_mic
Args:
v (list/numpy.ndarray): 3d vector or a list/array of 3d vectors
vectors (bool): Whether to return vectors (distances are returned if False)
Returns: numpy.ndarray of the same shape as input with mic
"""
vecs = np.asarray(v).reshape(-1, 3)
if any(self.pbc):
vecs = np.einsum('ji,nj->ni', np.linalg.inv(self.cell), vecs)
vecs[:,self.pbc] -= np.rint(vecs)[:,self.pbc]
vecs = np.einsum('ji,nj->ni', self.cell, vecs)
if vectors:
return vecs.reshape(np.asarray(v).shape)
return np.linalg.norm(vecs, axis=-1).reshape(np.asarray(v).shape[:-1])
def get_distance(self, a0, a1, mic=True, vector=False):
"""
Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
vector=True gives the distance vector (from a0 to a1).
Args:
a0 (int/numpy.ndarray/list): position or atom ID
a1 (int/numpy.ndarray/list): position or atom ID
mic (bool): minimum image convention (True if periodic boundary conditions should be considered)
vector (bool): True, if instead of distnce the vector connecting the two positions should be returned
Returns:
float: distance or vectors in length unit
"""
from ase.geometry import find_mic
positions = self.positions
if isinstance(a0, list) or isinstance(a0, np.ndarray):
if not (len(a0) == 3):
raise AssertionError()
a0 = np.array(a0)
else:
a0 = positions[a0]
if isinstance(a1, list) or isinstance(a1, np.ndarray):
if not (len(a1) == 3):
raise AssertionError()
a1 = np.array(a1)
else:
a1 = positions[a1]
distance = np.array([a1 - a0])
if mic:
distance, d_len = find_mic(distance, self.cell, self.pbc)
else:
d_len = np.array([np.sqrt((distance ** 2).sum())])
if vector:
return distance[0]
return d_len[0]
def get_distances_array(self, p1=None, p2=None, mic=True, vectors=False):
"""
Return distance matrix of every position in p1 with every position in
p2. If p2 is not set, it is assumed that distances between all
positions in p1 are desired. p2 will be set to p1 in this case. If both
p1 and p2 are not set, the distances between all atoms in the box are
returned.
Args:
p1 (numpy.ndarray/list): Nx3 array of positions
p2 (numpy.ndarray/list): Nx3 array of positions
mic (bool): minimum image convention
vectors (bool): return vectors instead of distances
Returns:
numpy.ndarray: NxN if vector=False and NxNx3 if vector=True
"""
if p1 is None and p2 is not None:
p1 = p2
p2 = None
if p1 is None:
p1 = self.positions
if p2 is None:
p2 = self.positions
p1 = np.asarray(p1)
p2 = np.asarray(p2)
diff_relative = p2.reshape(-1,3)[np.newaxis,:,:]-p1.reshape(-1,3)[:,np.newaxis,:]
diff_relative = diff_relative.reshape(p1.shape[:-1]+p2.shape[:-1]+(3,))
if not mic:
if vectors:
return diff_relative
else:
return np.linalg.norm(diff_relative, axis=-1)
return self.find_mic(diff_relative, vectors=vectors)
def append(self, atom):
if isinstance(atom, ASEAtom):
super(Atoms, self).append(atom=atom)
else:
new_atoms = atom.copy()
if new_atoms.pbc.all() and np.isclose(new_atoms.get_volume(), 0):
new_atoms.cell = self.cell
new_atoms.pbc = self.pbc
self += new_atoms
def extend(self, other):
"""
Extend atoms object by appending atoms from *other*. (Extending the ASE function)
Args:
other (pyiron_atomistics.atomistics.structure.atoms.Atoms/ase.atoms.Atoms): Structure to append
Returns:
pyiron.atomistics.structure.atoms.Atoms: The extended structure
"""
old_indices = self.indices
if isinstance(other, Atom):
other = self.__class__([other])
elif isinstance(other, ASEAtom):
other = self.__class__([ase_to_pyiron_atom(other)])
if not isinstance(other, Atoms) and isinstance(other, ASEAtoms):
warnings.warn("Converting ase structure to pyiron before appending the structure")
other = ase_to_pyiron(other)
new_indices = other.indices.copy()
super(Atoms, self).extend(other=other)
if isinstance(other, Atoms):
if not np.allclose(self.cell, other.cell):
warnings.warn("You are adding structures with different cell shapes. "
"Taking the cell and pbc of the first structure:{}".format(self.cell))
if not np.array_equal(self.pbc, other.pbc):
warnings.warn("You are adding structures with different periodic boundary conditions. "
"Taking the cell and pbc of the first structure:{}".format(self.cell))
sum_atoms = self
# sum_atoms = copy(self)
sum_atoms._tag_list = sum_atoms._tag_list + other._tag_list
sum_atoms.indices = np.append(sum_atoms.indices, other.indices)
new_species_lst = copy(sum_atoms.species)
ind_conv = {}
for ind_old, el in enumerate(other.species):
if el.Abbreviation in sum_atoms._store_elements.keys():
ind_new = sum_atoms._species_to_index_dict[
sum_atoms._store_elements[el.Abbreviation]
]
ind_conv[ind_old] = ind_new
else:
new_species_lst.append(el)
sum_atoms._store_elements[el.Abbreviation] = el
ind_conv[ind_old] = len(new_species_lst) - 1
for key, val in ind_conv.items():
new_indices[new_indices == key] = val + 1000
new_indices = np.mod(new_indices, 1000)
sum_atoms.indices[len(old_indices):] = new_indices
sum_atoms.set_species(new_species_lst)
if not len(set(sum_atoms.indices)) == len(sum_atoms.species):
raise ValueError("Adding the atom instances went wrong!")
return self
__iadd__ = extend
def __copy__(self):
"""
Copies the atoms object
Returns:
atoms_new: A copy of the object
"""
# Using ASE copy
atoms_new = super(Atoms, self).copy()
ase_keys = list(ASEAtoms().__dict__.keys())
ase_keys.append("_pse")
# Only copy the non ASE keys
for key, val in self.__dict__.items():
if key not in ase_keys:
atoms_new.__dict__[key] = copy(val)
atoms_new._visualize = Visualize(atoms_new)
atoms_new._analyse = Analyse(atoms_new)
return atoms_new
def __delitem__(self, key):
if isinstance(key, (int, np.integer)):
key = [key]
key = np.array(key).flatten()
new_length = len(self) - len(np.arange(len(self))[np.asarray(key)])
super(Atoms, self).__delitem__(key)
self.indices = np.delete(self.indices, key, axis=0)
del self._tag_list[key]
self._tag_list._length = new_length
deleted_species_indices = list()
retain_species_indices = list()
new_indices = self.indices.copy()
for i, el in enumerate(self.species):
if len(self.select_index(el)) == 0:
deleted_species_indices.append(i)
new_indices[new_indices >= i] += -1
else:
retain_species_indices.append(i)
new_species = np.array(self.species).copy()[retain_species_indices]
self.set_species(new_species)
self.indices = new_indices
def __eq__(self, other):
return super(Atoms, self).__eq__(other) and \
np.array_equal(self.get_chemical_symbols(), other.get_chemical_symbols())
def __ne__(self, other):
return not self == other
def __getitem__(self, item):
new_dict = dict()
if isinstance(item, int):
for key, value in self._tag_list.items():
if item < len(value):
if value[item] is not None:
new_dict[key] = value[item]
element = self.species[self.indices[item]]
index = item
position = self.positions[item]
return Atom(
element=element,
position=position,
pse=self._pse,
index=index,
atoms=self,
**new_dict
)
new_array = super(Atoms, self).__getitem__(item)
new_array.dimension = self.dimension
if isinstance(item, tuple):
item = list(item)
new_indices = self.indices[item].copy()
new_species_indices, new_proper_indices = np.unique(
new_indices, return_inverse=True
)
new_species = [self.species[ind] for ind in new_species_indices]
new_array.set_species(new_species)
new_array.indices = new_proper_indices
new_array._tag_list = self._tag_list[item]
# new_array._tag_list._length = self._tag_list._length
new_array._tag_list._length = len(new_array)
if isinstance(new_array, Atom):
natoms = len(self)
if item < -natoms or item >= natoms:
raise IndexError("Index out of range.")
new_array.index = item
return new_array
def __getattr__(self, item):
if item in self._tag_list.keys():
return self._tag_list._lists[item]
return object.__getattribute__(self, item)
def __dir__(self):
new_dir = super().__dir__()
for key in self._tag_list.keys():
new_dir.append(key)
return new_dir
# def __len__(self):
# return len(self.indices)
def __repr__(self):
return self.__str__()
def __str__(self):
if len(self) == 0:
return "[]"
out_str = ""
for el, pos in zip(self.get_chemical_symbols(), self.positions):
out_str += el + ": " + str(pos) + "\n"
if len(self.get_tags()) > 0:
tags = self.get_tags()
out_str += "tags: \n" # + ", ".join(tags) + "\n"
for tag in tags:
out_str += (
" " + str(tag) + ": " + self._tag_list[tag].__str__() + "\n"
)
if self.cell is not None:
out_str += "pbc: " + str(self.pbc) + "\n"
out_str += "cell: \n"
out_str += str(self.cell) + "\n"
return out_str
def __setitem__(self, key, value):
if isinstance(key, (int, np.integer)):
old_el = self.species[self.indices[key]]
if isinstance(value, str):
el = PeriodicTable().element(value)
elif isinstance(value, ChemicalElement):
el = value
else:
raise TypeError("value should either be a string or a ChemicalElement.")
if el != old_el:
new_species = np.array(self.species).copy()
if len(self.select_index(old_el)) == 1:
if el.Abbreviation not in [
spec.Abbreviation for spec in new_species
]:
new_species[self.indices[key]] = el
self.set_species(list(new_species))
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
remove_index = self.indices[key]
new_species = list(new_species)
del new_species[remove_index]
self.indices[key] = ind
self.indices[self.indices > remove_index] -= 1
self.set_species(new_species)
else:
if el.Abbreviation not in [
spec.Abbreviation for spec in new_species
]:
new_species = list(new_species)
new_species.append(el)
self.set_species(new_species)
self.indices[key] = len(new_species) - 1
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
self.indices[key] = ind
elif isinstance(key, slice) or isinstance(key, (list, tuple, np.ndarray)):
if not isinstance(key, slice):
if hasattr(key, "__len__"):
if len(key) == 0:
return
else:
# Generating the correct numpy array from a slice input
if key.start is None:
start_val = 0
elif key.start < 0:
start_val = key.start + len(self)
else:
start_val = key.start
if key.stop is None:
stop_val = len(self)
elif key.stop < 0:
stop_val = key.stop + len(self)
else:
stop_val = key.stop
if key.step is None:
step_val = 1
else:
step_val = key.step
key = np.arange(start_val, stop_val, step_val)
if isinstance(value, (str, int, np.integer)):
el = PeriodicTable().element(value)
elif isinstance(value, ChemicalElement):
el = value
else:
raise ValueError(
"The value assigned should be a string, integer or a ChemicalElement instance"
)
replace_list = list()
new_species = list(np.array(self.species).copy())
for sp in self.species:
replace_list.append(
np.array_equal(
np.sort(self.select_index(sp)),
np.sort(np.intersect1d(self.select_index(sp), key)),
)
)
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
if not any(replace_list):
new_species.append(el)
self.set_species(new_species)
self.indices[key] = len(new_species) - 1
else:
replace_ind = np.where(replace_list)[0][0]
new_species[replace_ind] = el
if len(np.where(replace_list)[0]) > 1:
for ind in replace_list[1:]:
del new_species[ind]
self.set_species(new_species)
self.indices[key] = replace_ind
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
if not any(replace_list):
self.set_species(new_species)
self.indices[key] = ind
else:
self.indices[key] = ind
delete_indices = list()
new_indices = self.indices.copy()
for i, rep in enumerate(replace_list):
if i != ind and rep:
delete_indices.append(i)
# del new_species[i]
new_indices[new_indices >= i] -= 1
self.indices = new_indices.copy()
new_species = np.array(new_species)[
np.setdiff1d(np.arange(len(new_species)), delete_indices)
].tolist()
self.set_species(new_species)
else:
raise NotImplementedError()
# For ASE compatibility
self.numbers = self.get_atomic_numbers()
__mul__ = repeat
def __imul__(self, vec):
if isinstance(vec, (int, np.integer)):
vec = [vec] * self.dimension
initial_length = len(self)
if not hasattr(vec, '__len__'):
raise ValueError('Box repetition must be an integer or a list/ndarray of integers and not', type(vec))
if len(vec) != self.dimension:
raise AssertionError('Dimension of box repetition not consistent: ', len(vec), '!=', self.dimension)
i_vec = np.array([vec[0], 1, 1])
if self.dimension > 1:
i_vec[1] = vec[1]
if self.dimension > 2:
i_vec[2] = vec[2]
if not self.dimension == 3:
raise NotImplementedError()
mx, my, mz = i_vec
# Our position repeat algorithm is faster than ASE (no nested loops)
nx_lst, ny_lst, nz_lst = np.arange(mx), np.arange(my), np.arange(mz)
positions = self.get_scaled_positions(wrap=False)
lat = np.array(np.meshgrid(nx_lst, ny_lst, nz_lst)).T.reshape(-1, 3)
lat_new = np.repeat(lat, len(positions), axis=0)
new_positions = np.tile(positions, (len(lat), 1)) + lat_new
new_positions /= np.array(i_vec)
self.set_cell((self.cell.T * np.array(vec)).T, scale_atoms=True)
# ASE compatibility
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (np.product(vec),) + (1, ) * (len(a.shape) - 1))
self.arrays["positions"] = np.dot(new_positions, self.cell)
self.indices = np.tile(self.indices, len(lat))
self._tag_list._length = len(self)
scale = i_vec[0] * i_vec[1] * i_vec[2]
for tag in self._tag_list.keys():
self._tag_list[tag] *= scale
# Repeating ASE constraints
if self.constraints is not None:
self.constraints = [c.repeat(vec, initial_length) for c in self.constraints]
return self
@staticmethod
def convert_formula(elements):
"""
Convert a given chemical formula into a list of elements
Args:
elements (str): A string of the required chamical formula (eg. H2O)
Returns:
list: A list of elements corresponding to the formula
"""
el_list = []
num_list = ""
for i, char in enumerate(elements):
is_last = i == len(elements) - 1
if len(num_list) > 0:
if (not char.isdigit()) or is_last:
el_fac = ast.literal_eval(num_list) * el_list[-1]
for el in el_fac[1:]:
el_list.append(el)
num_list = ""
if char.isupper():
el_list.append(char)
elif char.islower():
el_list[-1] += char
elif char.isdigit():
num_list += char
if len(num_list) > 0:
# print "num_list: ", el_list, num_list, el_list[-1], (not char.isdigit()) or is_last
if (not char.isdigit()) or is_last:
el_fac = ast.literal_eval(num_list) * [el_list[-1]]
# print "el_fac: ", el_fac
for el in el_fac[1:]:
el_list.append(el)
num_list = ""
return el_list
def get_constraint(self):
if "selective_dynamics" in self._tag_list._lists.keys():
from ase.constraints import FixAtoms
return FixAtoms(indices=[atom_ind for atom_ind in
range(len(self)) if not any(self.selective_dynamics[atom_ind])])
else:
return None
def set_constraint(self, constraint=None):
super(Atoms, self).set_constraint(constraint)
if constraint is not None:
if constraint.todict()["name"] != "FixAtoms":
raise ValueError("Only FixAtoms is supported as ASE compatible constraint.")
if "selective_dynamics" not in self._tag_list._lists.keys():
self.add_tag(selective_dynamics=None)
for atom_ind in range(len(self)):
if atom_ind in constraint.index:
self.selective_dynamics[atom_ind] = [False, False, False]
else:
self.selective_dynamics[atom_ind] = [True, True, True]
def apply_strain(self, epsilon, return_box=False):
"""
Apply a given strain on the structure
Args:
epsilon (float/list/ndarray): epsilon matrix. If a single number is set, the same strain
is applied in each direction. If a 3-dim vector is set, it
will be multiplied by a unit matrix.
return_box (bool): whether to return a box. If set to True, only the returned box will
have the desired strain and the original box will stay unchanged.
"""
epsilon = np.array([epsilon]).flatten()
if len(epsilon) == 3 or len(epsilon) == 1:
epsilon = epsilon*np.eye(3)
epsilon = epsilon.reshape(3, 3)
if epsilon.min() < -1.0:
raise ValueError("Strain value too negative")
if return_box:
structure_copy = self.copy()
else:
structure_copy = self
cell = structure_copy.cell.copy()
cell = np.matmul(epsilon + np.eye(3), cell)
structure_copy.set_cell(cell, scale_atoms=True)
if return_box:
return structure_copy
def get_spherical_coordinates(self, x=None):
"""
Args:
x (list/ndarray): coordinates to transform. If not set, the positions
in structure will be returned.
Returns:
array in spherical coordinates
"""
if x is None:
x = self.positions.copy()
x = np.array(x).reshape(-1, 3)
r = np.linalg.norm(x, axis=-1)
phi = np.arctan2(x[:,2], x[:,1])
theta = np.arctan2(np.linalg.norm(x[:,:2], axis=-1), x[:,2])
return np.stack((r, theta, phi), axis=-1)
def get_initial_magnetic_moments(self):
"""
Get array of initial magnetic moments.
Returns:
numpy.array()
"""
if "spin" in self._tag_list._lists.keys():
return np.asarray(self.spin.list())
else:
spin_lst = [
element.tags["spin"] if "spin" in element.tags.keys() else None
for element in self.get_chemical_elements()
]
if any(spin_lst):
if (
isinstance(spin_lst, str)
or (
isinstance(spin_lst, (list, np.ndarray))
and isinstance(spin_lst[0], str)
)
) and "[" in list(set(spin_lst))[0]:
return np.array(
[
[
float(spin_dir)
for spin_dir in spin.replace("[", "")
.replace("]", "")
.replace(",", "")
.split()
]
if spin
else [0.0, 0.0, 0.0]
for spin in spin_lst
]
)
elif isinstance(spin_lst, (list, np.ndarray)):
return np.array(spin_lst)
else:
return np.array([float(spin) if spin else 0.0 for spin in spin_lst])
else:
return np.array([None] * len(self))
def set_initial_magnetic_moments(self, magmoms=None):
"""
Set array of initial magnetic moments.
Args:
magmoms (numpy.ndarray/list): List of magneric moments
"""
# pyiron part
if magmoms is not None:
if len(magmoms) != len(self):
raise ValueError("magmons can be collinear or non-collinear.")
if "spin" not in self._tag_list._lists.keys():
self.add_tag(spin=None)
for ind, spin in enumerate(magmoms):
self.spin[ind] = spin
self.spins = magmoms
def rotate(self, a=0.0, v=None, center=(0, 0, 0), rotate_cell=False, index_list=None
):
"""
Rotate atoms based on a vector and an angle, or two vectors. This function is completely adopted from ASE code
(https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.rotate)
Args:
a (float/list) in degrees = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
v (list/numpy.ndarray/string):
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
center (tuple/list/numpy.ndarray/str): The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
index_list (list/numpy.ndarray):
Indices of atoms to be rotated
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> atoms = Atoms()
>>> atoms.rotate(90, 'z')
>>> atoms.rotate(90, (0, 0, 1))
>>> atoms.rotate(-90, '-z')
>>> atoms.rotate('x', 'y')
>>> atoms.rotate((1, 0, 0), (0, 1, 0))
"""
if index_list is None:
super(Atoms, self).rotate(a=a, v=v, center=center, rotate_cell=rotate_cell)
else:
dummy_basis = copy(self)
dummy_basis.rotate(a=a, v=v, center=center, rotate_cell=rotate_cell)
self.positions[index_list] = dummy_basis.positions[index_list]
def to_ase(self):
return pyiron_to_ase(self)
def to_pymatgen(self):
return pyiron_to_pymatgen(self)
def to_ovito(self):
return pyiron_to_ovito(self)
class _CrystalStructure(Atoms):
"""
only for historical reasons
Args:
element:
BravaisLattice:
BravaisBasis:
LatticeConstants:
Dimension:
relCoords:
PSE:
**kwargs:
"""
def __init__(
self,
element="Fe",
bravais_lattice="cubic",
bravais_basis="primitive",
lattice_constants=None, # depending on symmetry length and angles
dimension=3,
rel_coords=True,
pse=None,
**kwargs
):
# print "basis0"
# allow also for scalar input for LatticeConstants (for a cubic system)
if lattice_constants is None:
lattice_constants = [1.0]
try:
test = lattice_constants[0]
except (TypeError, IndexError):
lattice_constants = [lattice_constants]
self.bravais_lattice = bravais_lattice
self.bravais_basis = bravais_basis
self.lattice_constants = lattice_constants
self.dimension = dimension
self.relCoords = rel_coords
self.element = element
self.__updateCrystal__(pse)
self.crystalParamsDict = {
"BravaisLattice": self.bravais_lattice,
"BravaisBasis": self.bravais_basis,
"LatticeConstants": self.lattice_constants,
}
self.crystal_lattice_dict = {
3: {
"cubic": ["fcc", "bcc", "primitive"],
"hexagonal": ["primitive", "hcp"],
"monoclinic": ["primitive", "base-centered"],
"triclinic": ["primitive"],
"orthorombic": [
"primitive",
"body-centered",
"base-centered",
"face-centered",
],
"tetragonal": ["primitive", "body-centered"],
"rhombohedral": ["primitive"],
},
2: {
"oblique": ["primitive"],
"rectangular": ["primitive", "centered"],
"hexagonal": ["primitive"],
"square": ["primitive"],
},
1: {"line": ["primitive"]},
}
# init structure for lattice parameters alat, blat, clat, alpha, beta, gamma
self.crystalLatticeParams = {
3: {
"cubic": [1.0],
"hexagonal": [1.0, 2.0],
"monoclinic": [1.0, 1.0, 1.0, 90.0],
"triclinic": [1.0, 2.0, 3.0, 90.0, 90.0, 90.0],
"orthorombic": [1.0, 1.0, 1.0],
"tetragonal": [1.0, 2.0],
"rhombohedral": [1.0, 90.0, 90.0, 90.0],
},
2: {
"oblique": [1.0, 1.0, 90.0],
"rectangular": [1.0, 1.0],
"hexagonal": [1.0],
"square": [1.0],
},
1: {"line": [1.0]},
}
# print "basis"
super(_CrystalStructure, self).__init__(
elements=self.ElementList,
scaled_positions=self.coordinates,
cell=self.amat, # tag = "Crystal",
pbc=[True, True, True][0 : self.dimension],
)
# ## private member functions
def __updateCrystal__(self, pse=None):
"""
Args:
pse:
Returns:
"""
self.__updateAmat__()
self.__updateCoordinates__()
self.__updateElementList__(pse)
def __updateAmat__(self): # TODO: avoid multi-call of this function
"""
Returns:
"""
# print "lat constants (__updateAmat__):", self.LatticeConstants
a_lat = self.lattice_constants[0]
if self.dimension == 3:
alpha = None
beta = None
gamma = None
b_lat, c_lat = None, None
if self.bravais_lattice == "cubic":
b_lat = c_lat = a_lat
alpha = beta = gamma = 90 / 180.0 * np.pi # 90 degrees
elif self.bravais_lattice == "tetragonal":
b_lat = a_lat
c_lat = self.lattice_constants[1]
alpha = beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == "triclinic":
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = self.lattice_constants[3] / 180.0 * np.pi
beta = self.lattice_constants[4] / 180.0 * np.pi
gamma = self.lattice_constants[5] / 180.0 * np.pi
elif self.bravais_lattice == "hexagonal":
b_lat = a_lat
c_lat = self.lattice_constants[1]
alpha = 60.0 / 180.0 * np.pi # 60 degrees
beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == "orthorombic":
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == "rhombohedral":
b_lat = a_lat
c_lat = a_lat
alpha = self.lattice_constants[1] / 180.0 * np.pi
beta = self.lattice_constants[2] / 180.0 * np.pi
gamma = self.lattice_constants[3] / 180.0 * np.pi
elif self.bravais_lattice == "monoclinic":
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = 0.5 * np.pi
beta = self.lattice_constants[3] / 180.0 * np.pi
gamma = 0.5 * np.pi
b1 = np.cos(alpha)
b2 = np.sin(alpha)
c1 = np.cos(beta)
c2 = (np.cos(gamma) - np.cos(beta) * np.cos(alpha)) / np.sin(alpha)
self.amat = np.array(
[
[a_lat, 0.0, 0.0],
[b_lat * b1, b_lat * b2, 0.0],
[c_lat * c1, c_lat * c2, c_lat * np.sqrt(1 - c2 * c2 - c1 * c1)],
]
)
elif self.dimension == 2: # TODO not finished yet
self.amat = a_lat * np.array([[1.0, 0.0], [0.0, 1.0]])
if self.bravais_lattice == "rectangular":
b_lat = self.lattice_constants[1]
self.amat = np.array([[a_lat, 0.0], [0.0, b_lat]])
elif self.dimension == 1:
self.amat = a_lat * np.array([[1.0]])
else:
raise ValueError("Bravais lattice not defined!")
def __updateElementList__(self, pse=None):
"""
Args:
pse:
Returns:
"""
self.ElementList = len(self.coordinates) * [self.element]
def __updateCoordinates__(self):
"""
Returns:
"""
# if relative coordinates
basis = None
if self.dimension == 3:
if self.bravais_basis == "fcc" or self.bravais_basis == "face-centered":
basis = np.array(
[[0.0, 0.0, 0.0], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]
)
elif self.bravais_basis == "body-centered" or self.bravais_basis == "bcc":
basis = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]])
elif self.bravais_basis == "base-centered":
basis = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]])
elif self.bravais_basis == "hcp":
# basis = r([[0.0,-1./np.sqrt(3.),np.sqrt(8./3.)]])
# a = self.LatticeConstants[0]
# c = self.LatticeConstants[1]
basis = np.array([[0.0, 0.0, 0.0], [1.0 / 3.0, 1.0 / 3.0, 1.0 / 2.0]])
# basis = np.dot(basis,np.linalg.inv(self.amat))
elif self.bravais_basis == "primitive":
basis = np.array([[0.0, 0.0, 0.0]])
else:
raise ValueError(
"Only fcc, bcc, hcp, base-centered, body-centered and primitive cells are supported for 3D."
)
elif self.dimension == 2:
if self.bravais_basis == "primitive":
basis = np.array([[0.0, 0.0]])
elif self.bravais_basis == "centered":
basis = np.array([[0.0, 0.0], [0.5, 0.5]])
else:
raise ValueError(
"Only centered and primitive cells are supported for 2D."
)
elif self.dimension == 1:
if self.bravais_basis == "primitive":
basis = np.array([[0.0]])
else:
raise ValueError(
"Only primitive cells are supported for 1D."
)
self.coordinates = basis
# ########################### get commmands ########################
def get_lattice_types(self):
"""
Returns:
"""
self.crystal_lattice_dict[self.dimension].keys().sort()
return self.crystal_lattice_dict[self.dimension].keys()
def get_dimension_of_lattice_parameters(self):
"""
Returns:
"""
# print "getDimensionOfLatticeParameters"
counter = 0
for k in self.get_needed_lattice_parameters():
if k:
counter += 1
return counter
def get_needed_lattice_parameters(self):
"""
Returns:
"""
# print "call: getNeededLatticeParams"
needed_params = [True, False, False, False, False, False]
if self.dimension == 3:
if self.bravais_lattice == "cubic":
needed_params = [
True,
False,
False,
False,
False,
False,
] # stands for alat, blat, clat, alpha, beta, gamma
elif self.bravais_lattice == "triclinic":
needed_params = [True, True, True, True, True, True]
elif self.bravais_lattice == "monoclinic":
needed_params = [True, True, True, True, False, False]
elif self.bravais_lattice == "orthorombic":
needed_params = [True, True, True, False, False, False]
elif self.bravais_lattice == "tetragonal":
needed_params = [True, False, True, False, False, False]
elif self.bravais_lattice == "rhombohedral":
needed_params = [True, False, False, True, True, True]
elif self.bravais_lattice == "hexagonal":
needed_params = [True, False, True, False, False, False]
elif self.dimension == 2:
if self.bravais_lattice == "oblique":
needed_params = [True, True, False, True, False, False]
elif self.bravais_lattice == "rectangular":
needed_params = [True, True, False, False, False, False]
elif self.bravais_lattice == "hexagonal":
needed_params = [True, False, False, False, False, False]
elif self.bravais_lattice == "square":
needed_params = [True, False, False, False, False, False]
else: # TODO: need to be improved
needed_params = [True, False, False, False, False, False]
elif self.dimension == 1:
if self.bravais_lattice == "line":
needed_params = [True, False, False, False, False, False]
else: # TODO: improval needed
needed_params = [True, False, False, False, False, False]
else:
raise ValueError("inconsistency in lattice structures")
return needed_params
def get_basis_types(self):
"""
Returns:
"""
self.crystal_lattice_dict[self.dimension].get(self.bravais_lattice).sort()
return self.crystal_lattice_dict[self.dimension].get(self.bravais_lattice)
def get_initial_lattice_constants(self):
"""
Returns:
"""
self.crystalLatticeParams[self.dimension].get(self.bravais_lattice).sort()
return (
self.crystalLatticeParams[self.dimension].get(self.bravais_lattice).sort()
)
# def getDimension(self):
# return self.dimension
# def getCoordinates(self):
# return self.coordinates
# def getCell(self):
# return self.amat
def get_atom_structure(self, rel=True):
"""
Args:
rel:
Returns:
"""
# print self.relCoords, self.amat
return Atoms(
elementList=self.ElementList,
coordinates=self.coordinates,
amat=self.amat,
tag="Crystal",
rel=rel, # self.relCoords, #rel, # true or false # coordinates are given in relative lattice units
pbc=[True, True, True][0 : self.dimension],
Crystal=self.crystalParamsDict,
)
# #################### set commands #########################
def set_lattice_constants(self, lattice_constants=None):
"""
Args:
lattice_constants:
Returns:
"""
if lattice_constants is None:
lattice_constants = [1.0]
for k in lattice_constants:
if k <= 0:
raise ValueError("negative lattice parameter(s)")
self.lattice_constants = lattice_constants
self.__updateCrystal__()
def set_element(self, element="Fe"):
"""
Args:
element:
Returns:
"""
self.element = element
self.__updateCrystal__()
def set_dimension(self, dim=3):
"""
Args:
dim:
Returns:
"""
self.dimension = dim
length = self.get_dimension_of_lattice_parameters()
if dim == 3: # # initial 3d structure
self.lattice_constants = length * [1.0]
self.bravais_lattice = "cubic"
self.bravais_basis = "primitive"
elif dim == 2: # # initial 2d structure
self.lattice_constants = length * [1.0]
self.bravais_lattice = "square"
self.bravais_basis = "primitive"
elif dim == 1: # # initial 1d structure
self.lattice_constants = length * [1.0]
self.bravais_lattice = "line"
self.bravais_basis = "primitive"
self.__updateCrystal__()
def set_lattice_type(self, name_lattice="cubic"):
"""
Args:
name_lattice:
Returns:
"""
# catch input error
# print "lattice type =", name_lattice
if name_lattice not in self.get_lattice_types():
raise ValueError("is not item of ")
else:
self.bravais_lattice = name_lattice
self.set_lattice_constants(
self.get_dimension_of_lattice_parameters() * [1.0]
)
self.set_basis_type(
name_basis=self.crystal_lattice_dict[self.dimension].get(name_lattice)[
0
]
) # initial basis type
self.__updateCrystal__()
def set_basis_type(self, name_basis="primitive"):
"""
Args:
name_basis:
Returns:
"""
if name_basis not in self.get_basis_types():
raise ValueError("is not item of")
else:
self.bravais_basis = name_basis
self.__updateCrystal__()
def atoms(self):
"""
Returns:
"""
return Atoms(
elements=self.ElementList,
scaled_positions=self.coordinates,
cell=self.amat,
pbc=[True, True, True][0 : self.dimension],
)
class CrystalStructure(object):
def __new__(cls, *args, **kwargs):
basis = _CrystalStructure(*args, **kwargs).atoms()
return basis
def ase_to_pyiron(ase_obj):
"""
Convert an ase.atoms.Atoms structure object to its equivalent pyiron structure
Args:
ase_obj(ase.atoms.Atoms): The ase atoms instance to convert
Returns:
pyiron.atomistics.structure.atoms.Atoms: The equivalent pyiron structure
"""
element_list = ase_obj.get_chemical_symbols()
cell = ase_obj.cell
positions = ase_obj.get_positions()
pbc = ase_obj.get_pbc()
spins = ase_obj.get_initial_magnetic_moments()
if all(spins == np.array(None)) or sum(np.abs(spins)) == 0.0:
pyiron_atoms = Atoms(
elements=element_list, positions=positions, pbc=pbc, cell=cell
)
else:
if any(spins == np.array(None)):
spins[spins == np.array(None)] = 0.0
pyiron_atoms = Atoms(
elements=element_list,
positions=positions,
pbc=pbc,
cell=cell,
magmoms=spins,
)
if hasattr(ase_obj, "constraints") and len(ase_obj.constraints) != 0:
for constraint in ase_obj.constraints:
constraint_dict = constraint.todict()
if constraint_dict["name"] == "FixAtoms":
if "selective_dynamics" not in pyiron_atoms._tag_list.keys():
pyiron_atoms.add_tag(selective_dynamics=[True, True, True])
pyiron_atoms.selective_dynamics[
constraint_dict["kwargs"]["indices"]
] = [False, False, False]
elif constraint_dict["name"] == "FixScaled":
if "selective_dynamics" not in pyiron_atoms._tag_list.keys():
pyiron_atoms.add_tag(selective_dynamics=[True, True, True])
pyiron_atoms.selective_dynamics[
constraint_dict["kwargs"]["a"]
] = constraint_dict["kwargs"]["mask"]
else:
warnings.warn("Unsupported ASE constraint: " + constraint_dict["name"])
return pyiron_atoms
def pyiron_to_ase(pyiron_obj):
element_list = pyiron_obj.get_parent_symbols()
cell = pyiron_obj.cell
positions = pyiron_obj.positions
pbc = pyiron_obj.get_pbc()
spins = pyiron_obj.get_initial_magnetic_moments()
if all(spins == np.array(None)) or sum(np.abs(spins)) == 0.0:
atoms = ASEAtoms(symbols=element_list, positions=positions, pbc=pbc, cell=cell)
else:
if any(spins == np.array(None)):
spins[spins == np.array(None)] = 0.0
atoms = ASEAtoms(
symbols=element_list, positions=positions, pbc=pbc, cell=cell, magmoms=spins
)
return atoms
def _check_if_simple_atoms(atoms):
"""
Raise a warning if the ASE atoms object includes properties which can not be converted to pymatgen atoms.
Args:
atoms: ASE atoms object
"""
dict_keys = [
k
for k in atoms.__dict__.keys()
if k
not in ["_celldisp", "arrays", "_cell", "_pbc", "_constraints", "info", "_calc"]
]
array_keys = [
k for k in atoms.__dict__["arrays"].keys() if k not in ["numbers", "positions"]
]
if not len(dict_keys) == 0:
warnings.warn("Found unknown keys: " + str(dict_keys))
if not np.all(atoms.__dict__["_celldisp"] == np.array([[0.0], [0.0], [0.0]])):
warnings.warn("Found cell displacement: " + str(atoms.__dict__["_celldisp"]))
if not atoms.__dict__["_calc"] is None:
warnings.warn("Found calculator: " + str(atoms.__dict__["_calc"]))
if not atoms.__dict__["_constraints"] == []:
warnings.warn("Found constraint: " + str(atoms.__dict__["_constraints"]))
if not np.all(atoms.__dict__["_pbc"]):
warnings.warn("Cell is not periodic: " + str(atoms.__dict__["_pbc"]))
if not len(array_keys) == 0:
warnings.warn("Found unknown flags: " + str(array_keys))
if not atoms.__dict__["info"] == dict():
warnings.warn("Info is not empty: " + str(atoms.__dict__["info"]))
def pymatgen_to_pyiron(pymatgen_obj):
"""
Convert pymatgen atoms object to pyiron atoms object (pymatgen->ASE->pyiron)
Args:
pymatgen_obj: pymatgen atoms object
Returns:
pyiron atoms object
"""
return ase_to_pyiron(AseAtomsAdaptor().get_atoms(structure=pymatgen_obj))
def pyiron_to_pymatgen(pyiron_obj):
"""
Convert pyiron atoms object to pymatgen atoms object
Args:
pyiron_obj: pyiron atoms object
Returns:
pymatgen atoms object
"""
ase_atoms = pyiron_to_ase(pyiron_obj)
_check_if_simple_atoms(atoms=ase_atoms)
return AseAtomsAdaptor().get_structure(atoms=ase_atoms, cls=None)
def ovito_to_pyiron(ovito_obj):
"""
Args:
ovito_obj:
Returns:
"""
try:
from ovito.data import ase_to_pyiron
return ase_to_pyiron(ovito_obj.to_ase_atoms())
except ImportError:
raise ValueError("ovito package not yet installed")
def pyiron_to_ovito(atoms):
"""
Args:
atoms:
Returns:
"""
try:
from ovito.data import DataCollection
return DataCollection.create_from_ase_atoms(atoms)
except ImportError:
raise ValueError("ovito package not yet installed")
def string2symbols(s):
"""
Convert string to list of chemical symbols.
Args:
s:
Returns:
"""
i = None
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == "(":
p = 0
for i, c in enumerate(s):
if c == "(":
p += 1
elif c == ")":
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1 : j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
"""
Args:
symbols (list, str):
Returns:
"""
pse = PeriodicTable()
df = pse.dataframe.T
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = list()
for sym in symbols:
if isinstance(sym, str):
numbers.append(df[sym]["AtomicNumber"])
else:
numbers.append(sym)
return numbers
def string2vector(v):
"""
Args:
v:
Returns:
"""
if isinstance(v, str):
if v[0] == "-":
return -string2vector(v[1:])
w = np.zeros(3)
w["xyz".index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""
Helper function for setting default values.
Args:
data:
dflt:
Returns:
"""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
class Symbols(ASESymbols):
"""
Derived from the ase symbols class which has the following docs:
Args:
numbers list/numpy.ndarray): List of atomic numbers
"""
def __init__(self, numbers):
self.__doc__ = self.__doc__ + "\n" + super().__doc__
super().__init__(numbers)
self._structure = None
@property
def structure(self):
"""
The structure to which the symbol is assigned to
Returns:
pyiron_atomistics.atomistics.structure.atoms.Atoms: The required structure
"""
return self._structure
@structure.setter
def structure(self, val):
self._structure = val
def __setitem__(self, key, value):
super().__setitem__(key, value)
if self._structure is not None:
index_array = np.argwhere(self.numbers != self._structure.get_atomic_numbers()).flatten()
replace_elements = self.structure.numbers_to_elements(self.numbers[index_array])
for i, el in enumerate(replace_elements):
self._structure[index_array[i]] = el
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.analysis.residuals Contains the ResidualAnalyser class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from .component import AnalysisComponent
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
from ...magic.core.frame import Frame
from ...magic.plot.imagegrid import ResidualImageGridPlotter
from ...magic.basics.skyregion import SkyRegion
# -----------------------------------------------------------------
class ResidualAnalyser(AnalysisComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(ResidualAnalyser, self).__init__(config)
# -- Attributes --
# The simulated images
self.simulated = dict()
# The observed images
self.observed = dict()
# The residual images
self.residuals = dict()
# The truncation ellipse
self.ellipse = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new ResidualAnalyser instance
analyser = cls()
# Set the modeling path
analyser.config.path = arguments.path
# Return the new instance
return analyser
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the simulated images
self.load_simulated_images()
# 3. Load the observed images
self.load_observed_images()
# 4. Rebin the images to the same pixel grid
self.rebin()
# Load the truncation ellipse
self.load_truncation_ellipse()
# 5. Calculate the residual images
#self.calculate_residuals()
# 6. Writing
#self.write()
# 7. Plotting
self.plot()
# -----------------------------------------------------------------
def load_simulated_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the simulated images ...")
# Loop over all FITS files found in the analysis/misc directory
for path, name in fs.files_in_path(self.analysis_misc_path, extension="fits", returns=["path", "name"], contains="__"):
# Debugging
log.debug("Loading the '" + name + "' image ...")
# Get the filter name
filter_name = name.split("__")[1]
# Open the image
frame = Frame.from_file(path)
# Add the image frame to the dictionary
self.simulated[filter_name] = frame
# -----------------------------------------------------------------
def load_observed_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the observed images ...")
# Loop over all FITS files found in the 'truncated' directory
for path, name in fs.files_in_path(self.truncation_path, extension="fits", returns=["path", "name"]):
# Ignore the bulge, disk and model images
if name == "bulge" or name == "disk" or name == "model": continue
# Ignore the H alpha image
if "Halpha" in name: continue
# Check whether a simulated image exists for this band
if name not in self.simulated:
log.warning("The simulated version of the " + name + " image could not be found, skipping " + name + " data ...")
continue
# Debugging
log.debug("Loading the '" + name + "' image ...")
# The filter name is the image name
filter_name = name
# Open the image
frame = Frame.from_file(path)
# Add the image frame to the dictionary
self.observed[filter_name] = frame
# -----------------------------------------------------------------
def rebin(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Rebinning the observed and simulated images to the same resolution ...")
# Loop over the filter names
for filter_name in self.filter_names:
simulated = self.simulated[filter_name]
observed = self.observed[filter_name]
# Check whether the coordinate systems of the observed and simulated image match
if simulated.wcs == observed.wcs:
# Debugging
log.debug("The coordinate system of the simulated and observed image for the " + filter_name + " filter matches")
continue
# Debugging
log.debug("The coordinate system of the simulated and observed image for the " + filter_name + " does not match: rebinning the simulated image ...")
# Rebin the simulated image to the coordinate system of the observed image
simulated_rebinned = simulated.rebinned(observed.wcs)
# Replace the simulated frame by the rebinned frame
self.simulated[filter_name] = simulated_rebinned
# -----------------------------------------------------------------
def load_truncation_ellipse(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the ellipse region used for truncating the observed images ...")
# Determine the path
path = fs.join(self.truncation_path, "ellipse.reg")
# Get the ellipse
region = SkyRegion.from_file(path)
self.ellipse = region[0]
# -----------------------------------------------------------------
def calculate_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the residual images ...")
# Loop over the filter names
for filter_name in self.filter_names:
simulated = self.simulated[filter_name]
observed = self.observed[filter_name]
# Calculate the residual image
residual = (simulated - observed) / observed
#residual.replace_infs(0.0)
# Add the residual image to the dictionary
self.residuals[filter_name] = residual
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the residual frames
self.write_residuals()
# -----------------------------------------------------------------
def write_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual frames ...")
# Loop over the residual frames
for filter_name in self.residuals:
# Determine the path for this residual image
path = fs.join(self.analysis_residuals_path, filter_name + ".fits")
# Debugging
log.debug("Writing the residual frame for the " + filter_name + " band to '" + path + "' ...")
# Write the image
self.residuals[filter_name].save(path)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot a grid with the observed, simulated and residual images
self.plot_image_grid()
# -----------------------------------------------------------------
def plot_image_grid(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting a grid with the observed, simulated and residual images ...")
# Create the image grid plotter
plotter = ResidualImageGridPlotter(title="Image residuals")
# Loop over the filter names, add a row to the image grid plotter for each filter
for filter_name in self.filter_names_sorted:
observed = self.observed[filter_name]
simulated = self.simulated[filter_name]
plotter.add_row(observed, simulated, filter_name)
# Set the bounding box for the plotter
plotter.set_bounding_box(self.ellipse.bounding_box)
# Determine the path to the plot file
path = fs.join(self.analysis_residuals_path, "residuals.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
@property
def filter_names(self):
"""
This function ...
:return:
"""
# Get the filter names which appear in both the simulated and observed images
filter_names = list(set(self.simulated.keys() + self.observed.keys()))
return filter_names
# -----------------------------------------------------------------
@property
def filter_names_sorted(self):
"""
This function returns a list of the filter names, sorted on wavelength
:return:
"""
return sorted(self.filter_names, key=lambda key: self.observed[key].filter.pivotwavelength())
# -----------------------------------------------------------------
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_op_count_match,
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
import pytest
import numpy as np
import itertools
np.random.seed(1984)
class TestElementwiseOptimizationPasses:
"""
Input graph:
Const
|
V
input -----> convolution -----> add/sub ----> relu ---> out
Output graph:
input -----> convolution -----> relu ----> out
"""
@pytest.mark.parametrize(
"conv_dim, \
flip_add_input_order, \
add_batch_dim_to_const, \
use_sub_instead, \
prebuilt_bias, \
scalar_elementwise, \
use_conv_transpose",
itertools.product(
[
2,
3,
], # 1D conv conversion broken even without the pass: rdar://problem/62960720
[True, False], # flip_add_input_order
[True, False], # add_batch_dim_to_const
[True, False], # use_sub_instead
[True, False], # prebuilt_bias
[True, False], # scalar_elementwise
[True, False], # use_conv_transpose
),
)
def test_fuse_conv_bias(
self,
conv_dim,
flip_add_input_order,
add_batch_dim_to_const,
use_sub_instead,
prebuilt_bias,
scalar_elementwise,
use_conv_transpose,
):
if flip_add_input_order and use_sub_instead:
return
if use_conv_transpose and conv_dim != 2:
return
input_shape = None
W = None
Cout = 8
Cin = 3
D = 10
const = (
np.random.rand(Cout) if add_batch_dim_to_const else np.random.rand(1, Cout)
)
const = np.expand_dims(const, axis=-1)
if conv_dim == 1:
input_shape = (1, Cin, D)
W = np.random.rand(Cout, Cin, 1)
elif conv_dim == 2:
input_shape = (1, Cin, D, D)
W = np.random.rand(Cout, Cin, 1, 1)
const = np.expand_dims(const, axis=-1)
elif conv_dim == 3:
input_shape = (1, Cin, D, D, D)
W = np.random.rand(Cout, Cin, 1, 1, 1)
const = np.expand_dims(const, axis=-1)
const = np.expand_dims(const, axis=-1)
if use_conv_transpose:
W = np.swapaxes(W, 0, 1)
output_shape = list(input_shape)
output_shape[1] = Cout
if scalar_elementwise:
const = np.random.uniform(0)
@mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])
def prog(x):
kwargs = {
"x": x,
"weight": W,
"pad_type": "valid",
"dilations": [1] * conv_dim,
"strides": [1] * conv_dim,
}
if prebuilt_bias:
kwargs["bias"] = np.random.rand(Cout)
x = mb.conv_transpose(**kwargs) if use_conv_transpose else mb.conv(**kwargs)
if use_sub_instead:
x = mb.sub(x=x, y=const)
else:
x = mb.add(
x=const if flip_add_input_order else x,
y=x if flip_add_input_order else const,
)
x = mb.relu(x=x)
return x
element_op = "sub" if use_sub_instead else "add"
conv_op = "conv" if not use_conv_transpose else "conv_transpose"
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_conv_bias"
)
assert get_op_types_in_program(prev_prog) == [conv_op, element_op, "relu"]
assert get_op_types_in_program(prog) == [conv_op, "relu"]
old_bias = prev_block.find_ops(op_type=conv_op)[0].inputs.get("bias", None)
old_bias_val = 0 if old_bias is None else old_bias.val
assert old_bias_val is not None
assert block.find_ops(op_type=conv_op)[0].inputs["bias"] is not None
new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val
assert new_bias_val is not None
if use_sub_instead:
np.testing.assert_almost_equal(
old_bias_val - np.squeeze(const), new_bias_val
)
else:
np.testing.assert_almost_equal(
old_bias_val + np.squeeze(const), new_bias_val
)
assert_model_is_valid(
prog,
{"x": input_shape},
expected_output_shapes={block.outputs[0].name: tuple(output_shape)},
)
"""
Input graph:
Const
|
V
input -----> convolution -----> transpose -----> add/sub ---> out
Output graph:
input -----> convolution -----> transpose -----> out
"""
@pytest.mark.parametrize(
"conv_dim, has_bias, is_sub, is_conv_first_input, is_bias_scalar, is_deconv, is_all_1s",
itertools.product(
[1, 2, 3], # conv_dim
[True, False], # has_bias
[True, False], # is_sub
[True, False], # is_conv_first_input
[True, False], # is_bias_scalar
[True, False], # is_deconv
[True, False], # is_all_1s
),
)
def test_fuse_conv_bias_transpose_pattern(
self,
conv_dim,
has_bias,
is_sub,
is_conv_first_input,
is_bias_scalar,
is_deconv,
is_all_1s,
):
if is_all_1s and is_bias_scalar:
return
# construct the conv weight/bias
input_shape = None
Cout = 8
Cin = 3
D = 10
conv_weight = None
conv_bias = np.arange(Cout).astype(np.float32) if has_bias else np.zeros(Cout).astype(np.float32)
rank = conv_dim + 2
if conv_dim == 1:
input_shape = (1, Cin, D)
conv_weight = np.random.rand(Cout, Cin, 1)
elif conv_dim == 2:
input_shape = (1, Cin, D, D)
conv_weight = np.random.rand(Cout, Cin, 1, 1)
elif conv_dim == 3:
input_shape = (1, Cin, D, D, D)
conv_weight = np.random.rand(Cout, Cin, 1, 1, 1)
if is_deconv:
conv_weight = np.swapaxes(conv_weight, 0, 1)
output_shape = list(input_shape)
output_shape[1] = Cout
output_shape = np.array(output_shape)
# generate the perm for the tranpose op
perm = np.arange(rank)
np.random.shuffle(perm)
output_shape = output_shape[perm]
cout_index = np.where(perm == 1)[0][0]
# generate the const bias, and reshape it to a random broadcasable shape
bias = np.arange(Cout).astype(np.float32)
bias_shape = [1] * rank
bias_shape[cout_index] = Cout
if cout_index != 0:
crop_index = np.random.randint(low=0, high=cout_index + 1)
bias_shape = bias_shape[crop_index:]
bias = np.reshape(bias, bias_shape)
# for the scalar case, random generate a number
if is_bias_scalar:
bias = np.random.uniform(0)
# for the all 1s case, random generate a number and reshape it to (1, 1, ..., 1)
if is_all_1s:
bias = np.array([np.random.uniform(0)])
bias_rank = np.random.randint(low=1, high=rank+1)
bias_shape = [1] * bias_rank
bias = np.reshape(bias, bias_shape)
@mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])
def prog(x):
# conv or conv_transpose
kwargs = {
"x": x,
"weight": conv_weight,
"pad_type": "valid",
"dilations": [1] * conv_dim,
"strides": [1] * conv_dim,
}
if has_bias:
kwargs["bias"] = conv_bias
x = mb.conv_transpose(**kwargs) if is_deconv else mb.conv(**kwargs)
# transpose
x = mb.transpose(x=x, perm=perm)
# elementwise op
element_args = {"x": x, "y": bias} if is_conv_first_input else {"x": bias, "y": x}
element_op = mb.sub if is_sub else mb.add
x = element_op(**element_args)
return x
element_op = "sub" if is_sub else "add"
conv_op = "conv" if not is_deconv else "conv_transpose"
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_conv_bias"
)
assert get_op_types_in_program(prev_prog) == [conv_op, "transpose", element_op]
assert get_op_types_in_program(prog) == [conv_op, "transpose"]
# get the value of new weight/bias
new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val
assert new_bias_val is not None
new_weight_val = block.find_ops(op_type=conv_op)[0].inputs["weight"].val
assert new_weight_val is not None
# compare the weight
if is_sub and not is_conv_first_input:
np.testing.assert_almost_equal(new_weight_val, -conv_weight)
else:
np.testing.assert_almost_equal(new_weight_val, conv_weight)
# compare the bias
if is_sub:
if is_conv_first_input:
bias = -bias
else:
conv_bias = -conv_bias
expected_conv_bias_val = conv_bias + np.squeeze(bias)
np.testing.assert_almost_equal(expected_conv_bias_val, new_bias_val)
# run the model
assert_model_is_valid(
prog,
{"x": input_shape},
expected_output_shapes={block.outputs[0].name: tuple(output_shape)},
)
"""
Input graph:
Const Const
| |
V V
input -----> transpose -----> mul ----> add ---> out
Output graph:
input -----> transpose -----> batchnorm ----> out
"""
@pytest.mark.parametrize(
"flip_mul_input_order, flip_add_input_order, rank_3_const_input",
itertools.product([False, True], [False, True], [False, True]),
)
def test_mul_add_fusion_to_batchnorm(
self, flip_mul_input_order, flip_add_input_order, rank_3_const_input
):
C = 3
gamma = np.random.rand(1, C, 1, 1)
beta = np.random.rand(1, C, 1, 1)
if rank_3_const_input:
gamma = np.squeeze(gamma, axis=0)
beta = np.squeeze(beta, axis=0)
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 10, 10, C))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
if flip_mul_input_order:
x = mb.mul(x=gamma, y=x)
else:
x = mb.mul(x=x, y=gamma)
if flip_add_input_order:
x = mb.add(x=beta, y=x)
else:
x = mb.add(x=x, y=beta)
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_elementwise_to_batchnorm"
)
assert get_op_types_in_program(prev_prog) == ["transpose", "mul", "add"]
assert get_op_types_in_program(prog) == ["transpose", "batch_norm"]
assert_model_is_valid(
prog,
{"x": (1, 10, 10, C)},
expected_output_shapes={block.outputs[0].name: (1, C, 10, 10)},
)
|
import json
import mock
import pytest
from click.testing import CliRunner
from gradient.api_sdk import sdk_exceptions
from gradient.api_sdk.clients.http_client import default_headers
from gradient.cli import cli
from tests import MockResponse, example_responses
EXPECTED_HEADERS = default_headers.copy()
EXPECTED_HEADERS["ps_client_name"] = "gradient-cli"
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = EXPECTED_HEADERS.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
@pytest.fixture
def basic_options_metrics_stream_websocket_connection_iterator():
def generator(self):
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"memoryUsage",
"pod_metrics":{"nrwed38p":{"time_stamp":1588066152,"value":"54013952"}}}"""
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"cpuPercentage",
"pod_metrics":{"nrwed38p":{"time_stamp":1588066152,"value":"0.006907773333334353"}}}"""
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"memoryUsage",
"pod_metrics":{"nrwed38p":{"time_stamp":1588066155,"value":"12345667"}}}"""
raise sdk_exceptions.GradientSdkError()
return generator
@pytest.fixture
def all_options_metrics_stream_websocket_connection_iterator():
def generator(self):
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryFree",
"pod_metrics":{"nrwed38p":{"time_stamp":1588068626,"value":"1234"}}}"""
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryUsed",
"pod_metrics":{"nrwed38p":{"time_stamp":1588068646,"value":"32"}}}"""
yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryFree",
"pod_metrics":{"nrwed38p":{"time_stamp":1588068646,"value":"2345"}}}"""
raise sdk_exceptions.GradientSdkError()
return generator
class TestNotebooksCreate(object):
URL = "https://api.paperspace.io/notebooks/v2/createNotebook"
COMMAND = [
"notebooks",
"create",
"--machineType", "P5000",
"--container", "jupyter/notebook",
"--clusterId", "321"
]
EXPECTED_REQUEST_JSON = {
"vmTypeLabel": "P5000",
"containerName": "jupyter/notebook",
"clusterId": "321",
'isPreemptible': False,
'isPublic': False,
}
EXPECTED_RESPONSE_JSON = {
"handle": "some_id",
"notebookToken": None,
"jobId": 20163,
"isPublic": False,
"id": 1811,
"containerName": "jupyter/notebook",
}
EXPECTED_STDOUT = "Created new notebook with id: some_id\n" \
"https://www.paperspace.com/some_namespace/notebook/prg284tu2\n"
COMMAND_WITH_API_KEY_USED = [
"notebooks",
"create",
"--machineType", "P5000",
"--container", "jupyter/notebook",
"--clusterId", "321",
"--apiKey", "some_key",
]
COMMAND_WITH_ALL_OPTIONS = [
"notebooks",
"create",
"--machineType", "P5000",
"--container", "jupyter/notebook",
"--clusterId", "321",
"--name", "some_notebook_name",
"--registryUsername", "some_username",
"--registryPassword", "some_password",
"--command", "some_entrypoint",
"--containerUser", "some_container_user",
"--shutdownTimeout", "8",
"--isPreemptible",
]
EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS = {
"vmTypeLabel": "P5000",
"containerName": "jupyter/notebook",
"clusterId": "321",
"name": "some_notebook_name",
"registryUsername": "some_username",
"registryPassword": "some_password",
"defaultEntrypoint": "c29tZV9lbnRyeXBvaW50",
"containerUser": "some_container_user",
"shutdownTimeout": 8,
"isPreemptible": True,
"isPublic": False,
}
COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "create", "--optionsFile", ] # path added in test
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to create resource: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_post_request_and_print_notebook_id(self, post_patched, get_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_post_request_and_print_notebook_id_when_all_options_were_used(self, post_patched, get_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_ALL_OPTIONS)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_read_option_from_yaml_file(self, post_patched, get_patched, notebooks_create_config_path):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_create_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched):
post_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to create resource\n", result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
# TODO: Add test case for creating notebook with tag
class TestNotebooksFork(object):
URL = "https://api.paperspace.io/notebooks/v2/forkNotebook"
COMMAND = [
"notebooks",
"fork",
"--id", "n1234",
]
EXPECTED_REQUEST_JSON = {
"notebookId": "n1234",
}
EXPECTED_RESPONSE_JSON = {
"handle": "n1234",
"notebookToken": None,
"jobId": 20163,
"isPublic": False,
"id": 1811,
}
EXPECTED_STDOUT = "Notebook forked to id: n1234\n"
COMMAND_WITH_API_KEY_USED = [
"notebooks",
"fork",
"--id", "n1234",
"--apiKey", "some_key",
]
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fork notebook: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_post_request_and_print_notebook_id(self, post_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched):
post_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to fork notebook\n", result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
class TestNotebooksStart(object):
URL = "https://api.paperspace.io/notebooks/v2/startNotebook"
COMMAND = [
"notebooks",
"start",
"--id", "n123",
"--machineType", "c5.xlarge",
"--clusterId", "cl123",
]
EXPECTED_REQUEST_JSON = {
"notebookId": "n123",
"vmTypeLabel": "c5.xlarge",
"clusterId": "cl123",
"isPreemptible": False,
}
EXPECTED_RESPONSE_JSON = {
"handle": "n123",
"notebookToken": None,
"jobId": 20163,
"isPublic": False,
"id": 1811,
"containerId": 123,
}
EXPECTED_STDOUT = "Started notebook with id: n123\n"
COMMAND_WITH_API_KEY_USED = [
"notebooks",
"start",
"--id", "n123",
"--machineType", "c5.xlarge",
"--clusterId", "cl123",
"--apiKey", "some_key",
]
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to create resource: Invalid API token\n"
EXPECTED_STDOUT_WITH_KEY = "Started notebook with id: n123\n" \
"https://www.paperspace.com/some_namespace/notebook/prg284tu2\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched):
post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT_WITH_KEY, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched):
post_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to create resource\n", result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
class TestNotebooksStop(object):
URL = "https://api.paperspace.io/notebooks/v2/stopNotebook"
COMMAND = [
"notebooks",
"stop",
"--id", "n123",
]
EXPECTED_REQUEST_JSON = {
"notebookId": 'n123',
}
EXPECTED_STDOUT = "Stopping notebook with id: n123\n"
COMMAND_WITH_API_KEY_USED = [
"notebooks",
"stop",
"--id", "n123",
"--apiKey", "some_key",
]
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Unable to stop instance: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_post_request_and_print_notebook_id(self, post_patched):
post_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched):
post_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched):
post_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Unable to stop instance\n", result.exc_info
post_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
class TestListNotebookArtifacts(object):
runner = CliRunner()
URL = "https://api.paperspace.io/notebooks/artifactsList"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_valid_get_request_with_all_parameters_for_a_list_of_artifacts(self, get_patched):
get_patched.return_value = MockResponse()
notebook_id = "some_notebook_id"
result = self.runner.invoke(cli.cli,
["notebooks", "artifacts", "list", "--id", notebook_id, "--apiKey", "some_key", "--size",
"--links",
"--files", "foo"])
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params={"notebookId": notebook_id,
"size": True,
"links": True,
"files": "foo"})
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
@pytest.mark.parametrize('option,param', [("--size", "size"),
("-s", "size"),
("--links", "links"),
("-l", "links")])
def test_should_send_valid_get_request_with_valid_param_for_a_list_of_artifacts_for_both_formats_of_param(self,
get_patched,
option,
param):
get_patched.return_value = MockResponse(status_code=200)
notebook_id = "some_notebook_id"
result = self.runner.invoke(cli.cli,
["notebooks", "artifacts", "list", "--id", notebook_id, "--apiKey", "some_key"] + [option])
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params={"notebookId": notebook_id,
param: True})
assert result.exit_code == 0
class TestNotebooksDelete(object):
URL = "https://api.paperspace.io/notebooks/v2/deleteNotebook"
COMMAND = [
"notebooks",
"delete",
"--id", "some_id",
]
EXPECTED_REQUEST_JSON = {"notebookId": "some_id"}
EXPECTED_STDOUT = "Notebook deleted\n"
COMMAND_WITH_API_KEY_USED = [
"notebooks",
"delete",
"--id", "some_id",
"--apiKey", "some_key",
]
COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "delete", "--optionsFile", ] # path added in test
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to delete resource: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_post_request_and_print_notebook_id(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_read_option_from_yaml_file(self, post_patched, notebooks_delete_config_path):
post_patched.return_value = MockResponse(status_code=204)
command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_delete_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched):
get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.post")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched):
get_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to delete resource\n", result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.EXPECTED_REQUEST_JSON,
data=None,
files=None,
params=None)
assert result.exit_code == 0
class TestNotebooksdetails(object):
URL = "https://api.paperspace.io/notebooks/getNotebook"
COMMAND = ["notebooks", "details", "--id", "some_id"]
EXPECTED_STDOUT = """+---------+-----------------------------------+
| Name | some_name |
+---------+-----------------------------------+
| ID | ngw7piq9 |
| VM Type | K80 |
| State | Running |
| FQDN | ngw7piq9.dgradient.paperspace.com |
| Tags | |
+---------+-----------------------------------+
"""
EXPECTED_STDOUT_WITH_TAGS = """+---------+-----------------------------------+
| Name | some_name |
+---------+-----------------------------------+
| ID | ngw7piq9 |
| VM Type | K80 |
| State | Running |
| FQDN | ngw7piq9.dgradient.paperspace.com |
| Tags | tag1, tag2 |
+---------+-----------------------------------+
"""
RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE
RESPONSE_JSON_WITH_TAGS = example_responses.NOTEBOOK_GET_RESPONSE_WITH_TAGS
COMMAND_WITH_API_KEY_USED = ["notebooks", "details", "--id", "some_id", "--apiKey", "some_key"]
COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "details", "--optionsFile", ] # path added in test
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fetch data: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_post_request_and_print_notebook_details(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json={"notebookId": "some_id"},
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_post_request_and_print_notebook_details_with_tags(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_TAGS)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_TAGS, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json={"notebookId": "some_id"},
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json={"notebookId": "some_id"},
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_option_from_yaml_file(self, post_patched, notebooks_show_config_path):
post_patched.return_value = MockResponse(self.RESPONSE_JSON)
command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_show_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json={"notebookId": "some_id"},
params=None)
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched):
get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json={"notebookId": "some_id"},
params=None)
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched):
get_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to fetch data\n", result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json={"notebookId": "some_id"},
params=None)
assert result.exit_code == 0
class TestNotebooksList(object):
URL = "https://api.paperspace.io/notebooks/getNotebooks"
COMMAND = ["notebooks", "list"]
COMMAND_WITH_FILTERING_BY_TAGS = [
"notebooks", "list",
"--tag", "tag1",
"--tag", "tag2",
]
EXPECTED_STDOUT = """+--------------------+----------+
| Name | ID |
+--------------------+----------+
| job 1 | n1vmfj6x |
| job 1 | nhdf8zf3 |
| My Notebook 123 | nslk5r03 |
| My Notebook 123 | ng9a3tp4 |
| some_name | ngw7piq9 |
| some_notebook_name | n8h0d5lf |
| some_notebook_name | nl0b6cn0 |
| some_notebook_name | njmq1zju |
| some_notebook_name | nfcuwqu5 |
+--------------------+----------+
"""
RESPONSE_JSON = example_responses.NOTEBOOKS_LIST_RESPONSE_JSON
COMMAND_WITH_API_KEY_USED = ["notebooks", "list", "--apiKey", "some_key"]
COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "list", "--optionsFile", ] # path added in test
EXPECTED_FILTERS = {
"filter": {
"where": {
"dtDeleted": None,
},
"limit": 20,
"order": "jobId desc",
"offset": 0,
},
}
RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"}
EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fetch data: Invalid API token\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_post_request_and_print_notebook_details(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=mock.ANY)
params = post_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
assert "tagFilter[0]" not in params
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_post_request_and_print_notebook_details_when_filtering_by_tags(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTERING_BY_TAGS)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=mock.ANY)
params = post_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
assert "tagFilter[0]" in params
assert params["tagFilter[0]"] in ("tag1", "tag2")
assert params["tagFilter[1]"] in ("tag1", "tag2")
assert params["tagFilter[0]"] != params["tagFilter[1]"]
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_changed_headers_when_api_key_option_was_used(self, get_patched):
get_patched.return_value = MockResponse(self.RESPONSE_JSON)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params=mock.ANY)
params = get_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_option_from_yaml_file(self, get_patched, notebooks_list_config_path):
get_patched.return_value = MockResponse(self.RESPONSE_JSON)
command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_list_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert result.output == self.EXPECTED_STDOUT, result.exc_info
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params=mock.ANY)
params = get_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched):
get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=mock.ANY)
params = get_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
assert result.exit_code == 0
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched):
get_patched.return_value = MockResponse(status_code=400)
cli_runner = CliRunner()
result = cli_runner.invoke(cli.cli, self.COMMAND)
assert result.output == "Failed to fetch data\n", result.exc_info
get_patched.assert_called_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=mock.ANY)
params = get_patched.call_args.kwargs["params"]
filter_params = params["filter"]
filter_params = json.loads(filter_params)
assert filter_params == self.EXPECTED_FILTERS
assert result.exit_code == 0
class TestNotebooksMetricsGetCommand(object):
GET_NOTEBOOK_URL = "https://api.paperspace.io/notebooks/getNotebook"
GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/range"
BASIC_OPTIONS_COMMAND = [
"notebooks", "metrics", "get",
"--id", "ngw7piq9",
]
ALL_OPTIONS_COMMAND = [
"notebooks", "metrics", "get",
"--id", "ngw7piq9",
"--metric", "gpuMemoryFree",
"--metric", "gpuMemoryUsed",
"--interval", "20s",
"--start", "2020-04-01",
"--end", "2020-04-02 21:37:00",
"--apiKey", "some_key",
]
FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [
"notebooks", "metrics", "get",
"--optionsFile", # path added in test,
]
GET_NOTEBOOK_REQUEST_JSON = {"notebookId": "ngw7piq9"}
BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS = {
"start": "2019-09-03T11:10:36Z",
"handle": "ngw7piq9",
"interval": "30s",
"charts": "cpuPercentage,memoryUsage",
"objecttype": "notebook",
}
ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS = {
"start": "2020-04-01T00:00:00Z",
"handle": "ngw7piq9",
"interval": "20s",
"charts": "gpuMemoryFree,gpuMemoryUsed",
"objecttype": "notebook",
"end": "2020-04-02T21:37:00Z",
}
GET_NOTEBOOK_RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE
GET_METRICS_RESPONSE_JSON = example_responses.NOTEBOOKS_METRICS_GET_RESPONSE
EXPECTED_STDOUT = """{
"cpuPercentage": {
"npmnnm6e": [
{
"time_stamp": 1587993000,
"value": "0"
},
{
"time_stamp": 1587993030,
"value": "0"
},
{
"time_stamp": 1587993060,
"value": "0"
},
{
"time_stamp": 1587993090,
"value": "0"
},
{
"time_stamp": 1587993120,
"value": "0"
},
{
"time_stamp": 1587993150,
"value": "0"
},
{
"time_stamp": 1587993180,
"value": "0"
},
{
"time_stamp": 1587993210,
"value": "0"
},
{
"time_stamp": 1587993240,
"value": "0"
},
{
"time_stamp": 1587993270,
"value": "0"
},
{
"time_stamp": 1587993300,
"value": "0"
},
{
"time_stamp": 1587993330,
"value": "0"
},
{
"time_stamp": 1587993360,
"value": "0"
}
]
},
"memoryUsage": {
"npmnnm6e": [
{
"time_stamp": 1587992970,
"value": "0"
},
{
"time_stamp": 1587993000,
"value": "782336"
},
{
"time_stamp": 1587993030,
"value": "782336"
},
{
"time_stamp": 1587993060,
"value": "782336"
},
{
"time_stamp": 1587993090,
"value": "782336"
},
{
"time_stamp": 1587993120,
"value": "782336"
},
{
"time_stamp": 1587993150,
"value": "782336"
},
{
"time_stamp": 1587993180,
"value": "782336"
},
{
"time_stamp": 1587993210,
"value": "782336"
},
{
"time_stamp": 1587993240,
"value": "782336"
},
{
"time_stamp": 1587993270,
"value": "782336"
},
{
"time_stamp": 1587993300,
"value": "782336"
},
{
"time_stamp": 1587993330,
"value": "782336"
},
{
"time_stamp": 1587993360,
"value": "782336"
}
]
}
}
"""
EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Invalid API token\n"
EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND = "Failed to fetch data: Not found. " \
"Please contact [email protected] for help.\n"
EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND = """{
"cpuPercentage": null,
"memoryUsage": null
}
"""
EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE = "Failed to fetch data\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), \
str(result.output) + str(result.exc_info)
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
# comparing objects instead of strings because Py2 and Py3 produce slightly different outputs
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used(
self, get_patched, notebooks_metrics_get_config_path):
get_patched.side_effect = [
MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
command = self.FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [notebooks_metrics_get_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
# comparing objects instead of strings because Py2 and Py3 produce slightly different outputs
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_invalid_api_key_was_used(self, get_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"},
status_code=403)
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_deployment_was_not_found(self, get_patched):
get_patched.side_effect = [
MockResponse({"error": {"name": "ApplicationError", "status": 404,
"message": "Not found. Please contact [email protected] for help."}},
status_code=404),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND, result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_message_when_was_no_metrics_were_returned(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON),
MockResponse(example_responses.NOTEBOOKS_METRICS_GET_RESPONSE_WHEN_NO_METRICS_WERE_FOUND),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND.strip()) \
, str(result.output) + str(result.exc_info)
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_error_code_was_returned_without_error_message(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON),
MockResponse(status_code=500),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE, result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
class TestExperimentsMetricsStreamCommand(object):
GET_NOTEBOOK_URL = "https://api.paperspace.io/notebooks/getNotebook"
GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/stream"
BASIC_OPTIONS_COMMAND = [
"notebooks", "metrics", "stream",
"--id", "ngw7piq9",
]
ALL_OPTIONS_COMMAND = [
"notebooks", "metrics", "stream",
"--id", "ngw7piq9",
"--metric", "gpuMemoryFree",
"--metric", "gpuMemoryUsed",
"--interval", "20s",
"--apiKey", "some_key",
]
ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [
"notebooks", "metrics", "stream",
"--optionsFile", # path added in test,
]
GET_NOTEBOOK_REQUEST_JSON = {"notebookId": "ngw7piq9"}
BASIC_COMMAND_CHART_DESCRIPTOR = '{"chart_names": ["cpuPercentage", "memoryUsage"], "handles": ["ngw7piq9"' \
'], "object_type": "notebook", "poll_interval": "30s"}'
ALL_COMMANDS_CHART_DESCRIPTOR = '{"chart_names": ["gpuMemoryFree", "gpuMemoryUsed"], "handles": ["ngw7piq9' \
'"], "object_type": "notebook", "poll_interval": "20s"}'
GET_NOTEBOOK_RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE
GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND = {
"error": {
"name": "ApplicationError",
"status": 404,
"message": "Not found. Please contact [email protected] for help.",
},
}
EXPECTED_TABLE_1 = """+----------+---------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+----------+---------------+-------------+
| nrwed38p | | 54013952 |
+----------+---------------+-------------+
"""
EXPECTED_TABLE_2 = """+----------+----------------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+----------+----------------------+-------------+
| nrwed38p | 0.006907773333334353 | 54013952 |
+----------+----------------------+-------------+
"""
EXPECTED_TABLE_3 = """+----------+----------------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+----------+----------------------+-------------+
| nrwed38p | 0.006907773333334353 | 12345667 |
+----------+----------------------+-------------+
"""
ALL_OPTIONS_EXPECTED_TABLE_1 = """+----------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+----------+---------------+---------------+
| nrwed38p | 1234 | |
+----------+---------------+---------------+
"""
ALL_OPTIONS_EXPECTED_TABLE_2 = """+----------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+----------+---------------+---------------+
| nrwed38p | 1234 | |
+----------+---------------+---------------+
"""
ALL_OPTIONS_EXPECTED_TABLE_3 = """+----------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+----------+---------------+---------------+
| nrwed38p | 2345 | 32 |
+----------+---------------+---------------+
"""
EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Incorrect API Key provided\nForbidden\n"
EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND = "Failed to fetch data: Not found. Please contact " \
"[email protected] for help.\n"
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options(
self, get_patched, create_ws_connection_patched,
basic_options_metrics_stream_websocket_connection_iterator):
get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = basic_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
assert self.EXPECTED_TABLE_1 in result.output, result.exc_info
assert self.EXPECTED_TABLE_2 in result.output, result.exc_info
assert self.EXPECTED_TABLE_3 in result.output, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS,
)
ws_connection_instance_mock.send.assert_called_once_with(self.BASIC_COMMAND_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options(
self, get_patched, create_ws_connection_patched,
all_options_metrics_stream_websocket_connection_iterator):
get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert self.ALL_OPTIONS_EXPECTED_TABLE_1 in result.output, result.exc_info
assert self.ALL_OPTIONS_EXPECTED_TABLE_2 in result.output, result.exc_info
assert self.ALL_OPTIONS_EXPECTED_TABLE_3 in result.output, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used(
self, get_patched, create_ws_connection_patched,
all_options_metrics_stream_websocket_connection_iterator,
notebooks_metrics_stream_config_path):
get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
command = self.ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [notebooks_metrics_stream_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert self.ALL_OPTIONS_EXPECTED_TABLE_1 in result.output, result.exc_info
assert self.ALL_OPTIONS_EXPECTED_TABLE_2 in result.output, result.exc_info
assert self.ALL_OPTIONS_EXPECTED_TABLE_3 in result.output, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_invalid_api_key_was_used(
self, get_patched, create_ws_connection_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"}, 400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert "Failed to fetch data: Invalid API token\n" == result.output, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
create_ws_connection_patched.assert_not_called()
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_deployment_was_not_found(
self, get_patched, create_ws_connection_patched):
get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND, 404)
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND, result.exc_info
get_patched.assert_called_once_with(
self.GET_NOTEBOOK_URL,
json=self.GET_NOTEBOOK_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
create_ws_connection_patched.assert_not_called()
assert result.exit_code == 0, result.exc_info
|
from agility.main import Servo, Leg, Robot, Head, Body
from finesse.eclipse import Finesse
from theia.eye import Camera
################
# Define robot.
################
class Android:
camera = Camera(0, 90, 60)
# Leg 1.
servo1 = Servo(0, -180, 90, 500, 2500, 150, bias=-10, direction=1)
servo2 = Servo(1, -45, 225, 500, 2500, 150, bias=2, direction=1)
servo3 = Servo(2, -135, 135, 500, 2500, 150, bias=5, direction=-1)
leg1 = Leg(servo1, servo2, servo3, (6.3, 7.13), 0, Finesse.inverse_pack, Finesse.forward_pack)
# Leg 2.
servo4 = Servo(3, -90, 180, 500, 2500, 150, bias=5, direction=-1)
servo5 = Servo(4, -225, 45, 500, 2500, 150, bias=-5, direction=1)
servo6 = Servo(5, -135, 135, 500, 2500, 150, bias=5, direction=1)
leg2 = Leg(servo4, servo5, servo6, (6.3, 7.13), 1, Finesse.inverse_pack, Finesse.forward_pack)
# Leg 3.
servo7 = Servo(6, -90, 180, 500, 2500, 150, bias=-5, direction=1)
servo8 = Servo(7, -45, 225, 500, 2500, 150, bias=-2.5, direction=1)
servo9 = Servo(8, -135, 135, 500, 2500, 150, bias=5, direction=-1)
leg3 = Leg(servo7, servo8, servo9, (6.3, 7.13), 2, Finesse.inverse_pack, Finesse.forward_pack)
# Leg 4 .
servo10 = Servo(9, -180, 90, 500, 2500, 150, bias=-8, direction=-1)
servo11 = Servo(10, -225, 45, 500, 2500, 150, bias=5, direction=1)
servo12 = Servo(11, -135, 135, 500, 2500, 150, bias=0, direction=1)
leg4 = Leg(servo10, servo11, servo12, (6.3, 7.13), 3, Finesse.inverse_pack, Finesse.forward_pack)
# Head (emulated)
servo16 = Servo(16, -90, 90, 400, 2400, 100, bias=0, direction=1, left_bound=-45, right_bound=45)
servo17 = Servo(17, -90, 90, 400, 2400, 100, bias=0, direction=1, left_bound=-20, right_bound=20)
head = Head(servo16, servo17, camera)
# Body
body = Body(length=15.6, width=14.1, cx=0.4, cy=0, mb=20, ml=0)
# Robot.
robot = Robot(leg1, leg2, leg3, leg4, body, head)
#######################
# Connection variables.
#######################
class Crossbar:
# Crossbar.
ip = '192.168.43.245'
realm = 'lycanthrope'
authid = 'DOG-4S1'
secret = 'A@Q+xfQ[9<<5,+YG'
prefix = 'pack2'
|
from django.db import models
from jsonfield import JSONField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = models.DateField(auto_now_add=True)
modified = models.DateField(auto_now=True)
class Meta:
abstract = True
class DeblurData(TimeStampedModel):
deblur_data_text = models.TextField(blank=True)
image = models.ImageField(
upload_to="uploads/", blank=True, null=True, max_length=256
)
|
# Generated by Django 3.0 on 2020-10-13 16:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Vendor', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Products', '0003_comment'),
]
operations = [
migrations.CreateModel(
name='NewCategoryRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('categoryName', models.CharField(max_length=255)),
('describe', models.TextField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('vendor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Vendor.Vendor')),
],
options={
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
import subprocess
import os
from .sqlite_resource import SqliteResource
class Parser(object):
# path to data file dumped by demoinfogo
dat_file = os.path.dirname(os.path.realpath(__file__)) + "/dat/demo.dat"
# path to demoinfogo executable
path_to_demoinfogo = os.path.dirname(os.path.realpath(__file__)) + "/lib/demoinfogo"
# parsing keys
match_start = "round_announce_match_start"
player_info = "player info"
player_spawn = "player_spawn"
player_team = "player_team"
adding_player = "adding:player info:"
weapon_fire = "weapon_fire"
player_jump = "player_jump"
player_death = "player_death"
round_mvp = "round_mvp"
bomb_defused = "bomb_defused"
bomb_planted = "bomb_planted"
kills = "kills"
assists = "assists"
deaths = "deaths"
team_id = "team_id"
headshots = "headshots"
xuid = "xuid"
name = "name"
knife = "knife"
# grenade weapons
smoke = "smokegrenade"
flash = "flashbang"
hegrenade = "hegrenade"
molotov = "molotov"
incgrenade = "incgrenade"
decoy = "decoy"
fire = "firegrenade"
grenades = [
smoke,
flash,
hegrenade,
molotov,
incgrenade,
decoy
]
def __init__(self, demo_file):
self.demo_file = demo_file
self.command = str(self.path_to_demoinfogo) + ' -gameevents -nofootsteps -stringtables ' + str(self.demo_file)
self._call_demoinfogo()
self._parse_playerdata()
#print(self.match_data)
data_res = SqliteResource()
self.match_id = data_res.addMatchData(self.match_data)
data_res.addPlayerData(self.player_data)
#print(self.player_data)
def get_match_id(self):
return self.match_id
def _call_demoinfogo(self):
''' Calls demoinfogo with the demo file provided in the constructor '''
with open(self.dat_file, "w+") as f:
p = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, stdout = f)
print("Dumping demo to data file. Please wait.")
return_code = p.wait()
print("Dumping process finished.")
f.flush()
def _parse_playerdata(self):
''' Parses the data file dumped by demoinfogo.
Adds the data to a dictionary '''
self.player_data = {}
# match data used basically for the map name
# should think about adding rounds etc
self.match_data = {}
warmup_skipped = False
with open(self.dat_file, "r") as f:
lines = f.readlines()
# use indexes for handier parsing
for i in range(0, len(lines)):
line = lines[i]
if warmup_skipped is True:
# find weapon fire events - TODO don't include grenades or knives
if line.find(self.weapon_fire) > -1:
#user_id = self._get_user(lines[i+2])
#self._init_userdata(user_id)
weapon = self._get_weapon(lines[i+3])
if weapon in self.grenades:
if weapon == self.molotov or weapon == self.incgrenade:
#self.player_data[user_id][self.fire] += 1
self.increment_stats(lines[i+2], self.fire)
else:
#self.player_data[user_id][weapon] += 1
self.increment_stats(lines[i+2], weapon)
else:
#self.player_data[user_id][self.weapon_fire] += 1
self.increment_stats(lines[i+2], self.weapon_fire)
# skip next five indexes
i = i + 5
# find player jump events
if line.find(self.player_jump) > -1:
#user_id = self._get_user(lines[i+2])
#self._init_userdata(user_id)
#self.player_data[user_id][self.player_jump] += 1
self.increment_stats(lines[i+2], self.player_jump)
i = i + 3
# find player death events
if line.find(self.player_death) > -1:
user_id = self._get_user(lines[i+2])
attacker = self._get_user(lines[i+3])
assister = self._get_assister(lines[i+4])
if assister is not "0":
#self.player_data[assister][self.assists] += 1
self.increment_stats(lines[i+4], self.assists)
# is death event a headshot
hs = int(lines[i+9].split()[1])
if hs == 1:
#self.player_data[attacker][self.headshots] += 1
self.increment_stats(lines[i+3], self.headshots)
#self.player_data[user_id][self.deaths] += 1
self.increment_stats(lines[i+2], self.deaths)
#self.player_data[attacker][self.kills] += 1
self.increment_stats(lines[i+3], self.kills)
i = i + 13
if line.find(self.round_mvp) > -1:
#user_id = self._get_user(lines[i+2])
#self._init_userdata(user_id)
#self.player_data[user_id][self.round_mvp] += 1
self.increment_stats(lines[i+2], self.round_mvp)
if line.find(self.bomb_planted) > -1:
#user_id = self._get_user(lines[i+2])
#self._init_userdata(user_id)
#self.player_data[user_id][self.bomb_planted] += 1
self.increment_stats(lines[i+2], self.bomb_planted)
if line.find(self.bomb_defused) > -1:
#user_id = self._get_user(lines[i+2])
#self._init_userdata(user_id)
self.player_data[user_id][self.bomb_defused] += 1
self.increment_stats(lines[i+2], self.bomb_defused)
if line.find(self.player_team) > -1:
is_bot = int(lines[i+8].split()[1])
if is_bot == 0:
user_id = lines[i+2].split()[1].rstrip()
if not self.is_int(user_id):
#user_id = self._get_user(lines[i+2])
teamnum = int(lines[i+3].split()[1])
#self.player_data[user_id][self.team_id] = teamnum
self.set_stat(lines[i+2], self.team_id, teamnum)
else:
if line.find("0, maps/") > -1:
bsp = line.split()[1]
self.match_data['map'] = bsp
#if line.find(self.player_info) == 0:
# is_fake = int(lines[i+9].split(":")[1].rstrip())
# is_hltv = int(lines[i+10].split(":")[1].rstrip())
# user_id = lines[i+4].split(":")[1].rstrip()
# if user_id not in self.player_data and is_fake is 0 and is_hltv is 0:
# self._init_userdata(user_id)
# if is_fake is 0 and is_hltv is 0:
# xuid = lines[i+3].split(":")[1].rstrip()
# self.player_data[user_id][self.xuid] = xuid
if line.find(self.adding_player) > -1:
is_fake = int(lines[i+7].split(":")[1].rstrip())
if is_fake == 0:
user_id = int(lines[i+3].split(":")[1].rstrip())
self._init_userdata(user_id)
xuid = lines[i+1].split(":")[1].rstrip()
name = lines[i+2].split(":")[1].rstrip()
self.player_data[user_id][self.xuid] = xuid
self.player_data[user_id][self.name] = name
if line.find(self.match_start) > -1:
warmup_skipped = True
i = i + 2
def _get_user(self, line):
''' Finds a users name from the provided line '''
user_id = int(line.rstrip().strip(")").split("(")[1].split(":")[1])
return user_id
def _get_name(self, line):
name = line.split(" ")[2:-1]
return ' '.join(name)
def _get_assister(self, line):
''' Find assister '''
assister = line.rstrip().strip(" ").split(" ")[1]
if assister == '0':
return assister
else:
return int(line.rstrip().strip(" ").split(" ")[-1].strip("(").strip(")").split(":")[1])
def _get_weapon(self, line):
''' Finds weapon from the provided line '''
return line.split()[1]
def _init_userdata(self, user_id):
''' Initializes user data in the parse-dictionary '''
if user_id not in self.player_data:
self.player_data[user_id] = {
self.xuid: "",
self.weapon_fire: 0,
self.player_jump: 0,
self.kills: 0,
self.deaths: 0,
self.assists: 0,
self.bomb_planted: 0,
self.bomb_defused: 0,
self.team_id: 0,
self.headshots: 0,
self.round_mvp: 0,
self.smoke: 0,
self.flash: 0,
self.hegrenade: 0,
self.fire: 0,
self.decoy: 0,
self.name: ""
}
def _find_user_id_by_name(self, name):
for uid in self.player_data:
if self.player_data[uid][self.name] == name:
return uid
return None
def increment_stats(self, line, field):
user_id = self._get_user(line)
if user_id in self.player_data:
self.player_data[user_id][field] += 1
else:
user_id = self._find_user_id_by_name(self._get_name(line))
if user_id:
self.player_data[user_id][field] += 1
def set_stat(self, line, field, value):
user_id = self._get_user(line)
if user_id in self.player_data:
self.player_data[user_id][field] = value
else:
user_id = self._find_user_id_by_name(self._get_name(line))
if user_id:
self.player_data[user_id][field] = value
def is_int(self, s):
try:
int(s)
return True
except ValueError:
return False
|
import torch
from torch.autograd import Variable
import numpy as np
from torch import distributions as dis
from copy import deepcopy
import torch.nn as nn
EPS = 1e-6 # Avoid NaN (prevents division by zero or log of zero)
# CAP the standard deviation of the actor
LOG_STD_MAX = 2
LOG_STD_MIN = -20
REG = 1e-3 # regularization of the actor
class SAC(nn.Module):
def __init__(
self,
input_size,
action_size,
gamma=0.99,
reward_scale=1,
beta_h="auto_1.0",
policy_layers=[256, 256],
value_layers=[256, 256],
lr=3e-4,
act_fn=nn.ReLU,
):
super(SAC, self).__init__()
self.input_size = input_size
self.action_size = action_size
self.reward_scale = reward_scale
self.beta_h = beta_h
self.target_entropy = -np.float32(action_size)
if isinstance(self.beta_h, str) and self.beta_h.startswith("auto"):
# Default initial value of beta_h when learned
init_value = 1.0
if "_" in self.beta_h:
init_value = float(self.beta_h.split("_")[1])
assert (
init_value > 0.0
), "The initial value of beta_h must be greater than 0"
self.log_beta_h = torch.tensor(
np.log(init_value).astype(np.float32), requires_grad=True
)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.beta_h = float(self.beta_h)
self.gamma = gamma
self.policy_layers = policy_layers
self.value_layers = value_layers
# policy network
self.s2mua = nn.ModuleList()
last_layer_size = self.input_size
for layer_size in self.policy_layers:
self.s2mua.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.s2mua.append(act_fn())
self.s2mua.append(nn.Linear(last_layer_size, self.action_size, bias=True))
self.f_s2mua = nn.Sequential(*self.s2mua)
self.s2log_siga = nn.ModuleList()
last_layer_size = self.input_size
for layer_size in self.policy_layers:
self.s2log_siga.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.s2log_siga.append(act_fn())
self.s2log_siga.append(nn.Linear(last_layer_size, self.action_size, bias=True))
self.f_s2log_siga = nn.Sequential(*self.s2log_siga)
# V network
self.s2v = nn.ModuleList()
last_layer_size = self.input_size
for layer_size in self.value_layers:
self.s2v.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.s2v.append(act_fn())
self.s2v.append(nn.Linear(last_layer_size, 1, bias=True))
self.f_s2v = nn.Sequential(*self.s2v)
# Q network 1
self.sa2q1 = nn.ModuleList()
last_layer_size = self.input_size + self.action_size
for layer_size in self.value_layers:
self.sa2q1.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.sa2q1.append(act_fn())
self.sa2q1.append(nn.Linear(last_layer_size, 1, bias=True))
self.f_sa2q1 = nn.Sequential(*self.sa2q1)
# Q network 2
self.sa2q2 = nn.ModuleList()
last_layer_size = self.input_size + self.action_size
for layer_size in self.value_layers:
self.sa2q2.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.sa2q2.append(act_fn())
self.sa2q2.append(nn.Linear(last_layer_size, 1, bias=True))
self.f_sa2q2 = nn.Sequential(*self.sa2q2)
# target V network
self.s2v_tar = nn.ModuleList()
last_layer_size = self.input_size
for layer_size in self.value_layers:
self.s2v_tar.append(nn.Linear(last_layer_size, layer_size, bias=True))
last_layer_size = layer_size
self.s2v_tar.append(act_fn())
self.s2v_tar.append(nn.Linear(last_layer_size, 1, bias=True))
self.f_s2v_tar = nn.Sequential(*self.s2v_tar)
# synchronizing target V network and V network
state_dict_tar = self.f_s2v_tar.state_dict()
state_dict = self.f_s2v.state_dict()
for key in list(self.f_s2v.state_dict().keys()):
state_dict_tar[key] = state_dict[key]
self.f_s2v_tar.load_state_dict(state_dict_tar)
self.optimizer_a = torch.optim.Adam(
[*self.f_s2mua.parameters(), *self.f_s2log_siga.parameters()], lr=lr
)
self.optimizer_v = torch.optim.Adam(
[
*self.f_s2v.parameters(),
*self.f_sa2q1.parameters(),
*self.f_sa2q2.parameters(),
],
lr=lr,
)
if isinstance(self.beta_h, str):
self.optimizer_e = torch.optim.Adam(
[self.log_beta_h], lr=lr
) # optimizer for beta_h
self.mse_loss = nn.MSELoss()
def get_v(self, S):
pass
def get_q(self, S, A):
pass
def sample_z(self, mu, sig):
# Using reparameterization trick to sample from a gaussian
eps = Variable(torch.randn_like(mu))
return mu + sig * eps
def select(self, S, action_return="normal"):
# output action
if isinstance(S, np.ndarray):
S = torch.from_numpy(S.astype(np.float32))
mua = self.f_s2mua(S)
siga = torch.exp(self.f_s2log_siga(S))
a = np.tanh(self.sample_z(mua, siga).cpu().detach()).numpy()
if action_return == "mean":
return np.tanh(mua.cpu().detach().numpy())
else:
return a
def sample_action(self, S, detach=False):
# output action
mua = self.f_s2mua(S)
siga = torch.exp(self.f_s2log_siga(S).clamp(LOG_STD_MIN, LOG_STD_MAX))
if detach:
return (
torch.tanh(self.sample_z(mua, siga)).cpu().detach(),
mua.cpu().detach(),
siga.cpu().detach(),
)
else:
return torch.tanh(self.sample_z(mua, siga)), mua, siga
def learn(self, S, SP, R, A, D, V, computation="explicit", grad_clip=False):
if isinstance(S, np.ndarray):
S = torch.from_numpy(S)
if isinstance(SP, np.ndarray):
SP = torch.from_numpy(SP)
if isinstance(R, np.ndarray):
R = torch.from_numpy(R)
if isinstance(A, np.ndarray):
A = torch.from_numpy(A)
if isinstance(D, np.ndarray):
D = torch.from_numpy(D)
if isinstance(V, np.ndarray):
V = torch.from_numpy(V)
gamma = self.gamma
reward_scale = self.reward_scale
if isinstance(self.beta_h, str):
beta_h = torch.exp(self.log_beta_h).data
else:
beta_h = np.float32(self.beta_h)
S = S.data # shape: batch_size x num_steps x n_neurons
mua_tensor = self.f_s2mua(S)
siga_tensor = torch.exp(self.f_s2log_siga(S).clamp(LOG_STD_MIN, LOG_STD_MAX))
v_tensor = self.f_s2v(S)
vp_tensor = self.f_s2v_tar(SP)
q_tensor_1 = self.f_sa2q1(torch.cat((S, A), dim=-1))
q_tensor_2 = self.f_sa2q2(torch.cat((S, A), dim=-1))
# ------ explicit computing---------------
if computation == "explicit":
# ------ loss_v ---------------
sampled_u = self.sample_z(mua_tensor.data, siga_tensor.data).data
sampled_a = torch.tanh(sampled_u)
sampled_q = torch.min(
self.f_sa2q1(torch.cat((S, sampled_a), dim=-1)).data,
self.f_sa2q2(torch.cat((S, sampled_a), dim=-1)).data,
)
q_exp = sampled_q
log_pi_exp = torch.sum(
-(mua_tensor.data - sampled_u.data).pow(2)
/ (siga_tensor.data.pow(2))
/ 2
- torch.log(siga_tensor.data * torch.tensor(2.5066)).clamp(
LOG_STD_MIN, LOG_STD_MAX
),
dim=-1,
keepdim=True,
)
log_pi_exp -= torch.sum(
torch.log(1.0 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True
)
v_tar = (q_exp - beta_h * log_pi_exp.data).detach().data
loss_v = 0.5 * self.mse_loss(v_tensor * V, v_tar * V)
loss_q = 0.5 * self.mse_loss(
q_tensor_1 * V,
(reward_scale * R + (1 - D) * gamma * vp_tensor.detach().data) * V,
) + 0.5 * self.mse_loss(
q_tensor_2 * V,
(reward_scale * R + (1 - D) * gamma * vp_tensor.detach().data) * V,
)
loss_critic = loss_v + loss_q
# -------- loss_a ---------------
sampled_u = Variable(
self.sample_z(mua_tensor.data, siga_tensor.data), requires_grad=True
)
sampled_a = torch.tanh(sampled_u)
Q_tmp = torch.min(
self.f_sa2q1(torch.cat((S, torch.tanh(sampled_u)), dim=-1)),
self.f_sa2q2(torch.cat((S, torch.tanh(sampled_u)), dim=-1)),
)
Q_tmp.backward(torch.ones_like(Q_tmp))
PQPU = sampled_u.grad.data # \frac{\partial Q}{\partial a}
eps = (sampled_u.data - mua_tensor.data) / (
siga_tensor.data
) # action noise quantity
a = sampled_a.data # action quantity
grad_mua = (beta_h * (2 * a) - PQPU).data * V.repeat_interleave(
a.size()[-1], dim=-1
) + REG * mua_tensor * V.repeat_interleave(a.size()[-1], dim=-1)
grad_siga = (
-beta_h / (siga_tensor.data + EPS) + 2 * beta_h * a * eps - PQPU * eps
).data * V.repeat_interleave(
a.size()[-1], dim=-1
) + REG * siga_tensor * V.repeat_interleave(
a.size()[-1], dim=-1
)
self.optimizer_v.zero_grad()
loss_critic.backward()
if grad_clip:
nn.utils.clip_grad_norm_(
[
*self.f_s2v.parameters(),
*self.f_sa2q1.parameters(),
*self.f_sa2q2.parameters(),
],
1.0,
)
self.optimizer_v.step()
self.optimizer_a.zero_grad()
mua_tensor.backward(
grad_mua / torch.ones_like(mua_tensor, dtype=torch.float32).sum()
)
siga_tensor.backward(
grad_siga / torch.ones_like(siga_tensor, dtype=torch.float32).sum()
)
if grad_clip:
nn.utils.clip_grad_value_(
[*self.f_s2log_siga.parameters(), *self.f_s2mua.parameters()], 1.0
)
self.optimizer_a.step()
# Using Torch API for computing log_prob
# ------ implicit computing---------------
elif computation == "implicit":
# --------- loss_v ------------
mu_prob = dis.Normal(mua_tensor, siga_tensor)
sampled_u = mu_prob.sample()
sampled_a = torch.tanh(sampled_u)
log_pi_exp = (
torch.sum(
mu_prob.log_prob(sampled_u).clamp(LOG_STD_MIN, LOG_STD_MAX),
dim=-1,
keepdim=True,
)
- torch.sum(torch.log(1 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True)
)
sampled_q = torch.min(
self.f_sa2q1(torch.cat((S, sampled_a), dim=-1)).data,
self.f_sa2q2(torch.cat((S, sampled_a), dim=-1)).data,
)
q_exp = sampled_q
v_tar = (q_exp - beta_h * log_pi_exp.data).detach().data
loss_v = 0.5 * self.mse_loss(v_tensor * V, v_tar * V)
loss_q = 0.5 * self.mse_loss(
q_tensor_1 * V,
(reward_scale * R + (1 - D) * gamma * vp_tensor.detach().data) * V,
) + 0.5 * self.mse_loss(
q_tensor_2 * V,
(reward_scale * R + (1 - D) * gamma * vp_tensor.detach().data) * V,
)
loss_critic = loss_v + loss_q
# ----------- loss_a ---------------
mu_prob = dis.Normal(mua_tensor, siga_tensor)
sampled_u = mu_prob.rsample()
sampled_a = torch.tanh(sampled_u)
log_pi_exp = (
torch.sum(
mu_prob.log_prob(sampled_u).clamp(LOG_STD_MIN, LOG_STD_MAX),
dim=-1,
keepdim=True,
)
- torch.sum(torch.log(1 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True)
)
loss_a = torch.mean(
beta_h * log_pi_exp * V
- torch.min(
self.f_sa2q1(torch.cat((S, sampled_a), dim=-1)),
self.f_sa2q2(torch.cat((S, sampled_a), dim=-1)),
)
* V
) + REG / 2 * (
torch.mean(
(
siga_tensor
* V.repeat_interleave(siga_tensor.size()[-1], dim=-1)
).pow(2)
)
+ torch.mean(
(
mua_tensor * V.repeat_interleave(mua_tensor.size()[-1], dim=-1)
).pow(2)
)
)
self.optimizer_v.zero_grad()
loss_critic.backward()
if grad_clip:
nn.utils.clip_grad_norm_(
[
*self.f_s2v.parameters(),
*self.f_sa2q1.parameters(),
*self.f_sa2q2.parameters(),
],
1.0,
)
self.optimizer_v.step()
self.optimizer_a.zero_grad()
loss_a.backward()
if grad_clip:
nn.utils.clip_grad_value_(
[*self.f_s2log_siga.parameters(), *self.f_s2mua.parameters()], 1.0
)
self.optimizer_a.step()
# --------------------------------------------------------------------------
# update entropy coefficient if required
if isinstance(self.beta_h, str):
self.optimizer_e.zero_grad()
loss_e = -torch.mean(
self.log_beta_h * (log_pi_exp + self.target_entropy).data
)
loss_e.backward()
self.optimizer_e.step()
# update target V network
state_dict_tar = self.f_s2v_tar.state_dict()
state_dict = self.f_s2v.state_dict()
for key in list(self.f_s2v.state_dict().keys()):
state_dict_tar[key] = 0.995 * state_dict_tar[key] + 0.005 * state_dict[key]
self.f_s2v_tar.load_state_dict(state_dict_tar)
if computation == "implicit":
return loss_v.item(), loss_a.item(), loss_a.item(), loss_q.item()
elif computation == "explicit":
return (
loss_v.item(),
torch.mean(grad_mua).item(),
torch.mean(grad_siga).item(),
loss_q.item(),
)
class SACRNN(nn.Module):
def __init__(
self,
input_size,
action_size,
gamma=0.99,
reward_scale=1,
beta_h="auto_1.0",
shared_layers=256,
output_layers=256,
lr=3e-4,
):
super(SACRNN, self).__init__()
self.input_size = input_size
self.action_size = action_size
self.reward_scale = reward_scale
self.beta_h = beta_h
self.target_entropy = -np.float32(action_size)
if isinstance(self.beta_h, str) and self.beta_h.startswith("auto"):
# Default initial value of beta_h when learned
init_value = 1.0
if "_" in self.beta_h:
init_value = float(self.beta_h.split("_")[1])
assert (
init_value > 0.0
), "The initial value of beta_h must be greater than 0"
self.log_beta_h = torch.tensor(
np.log(init_value).astype(np.float32), requires_grad=True
)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.beta_h = float(self.beta_h)
self.gamma = gamma
self.shared_layers = shared_layers
self.output_layers = output_layers
# shared lstm
self.rnn = nn.LSTM(
input_size=self.input_size,
hidden_size=self.shared_layers,
num_layers=1,
batch_first=False,
bias=True,
)
self.out_s2mua = nn.Sequential(
nn.Linear(self.shared_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.action_size),
)
self.out_s2log_siga = nn.Sequential(
nn.Linear(self.shared_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.action_size),
)
self.out_s2v = nn.Sequential(
nn.Linear(self.shared_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, 1),
)
self.out_sa2q1 = nn.Sequential(
nn.Linear(self.shared_layers + self.action_size, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, 1),
)
self.out_sa2q2 = nn.Sequential(
nn.Linear(self.shared_layers + self.action_size, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, 1),
)
self.out_s2v_tar = nn.Sequential(
nn.Linear(self.shared_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, self.output_layers),
nn.ReLU(),
nn.Linear(self.output_layers, 1),
)
# synchronizing target V network and V network
state_dict_tar = self.out_s2v_tar.state_dict()
state_dict = self.out_s2v.state_dict()
for key in list(self.out_s2v.state_dict().keys()):
state_dict_tar[key] = state_dict[key]
self.out_s2v_tar.load_state_dict(state_dict_tar)
self.optimizer_a = torch.optim.Adam(self.parameters(), lr=lr)
self.optimizer_v = torch.optim.Adam(self.parameters(), lr=lr)
if isinstance(self.beta_h, str):
self.optimizer_e = torch.optim.Adam(
[self.log_beta_h], lr=lr
) # optimizer for beta_h
self.mse_loss = nn.MSELoss()
# hidden states of the networks
self.hc = (None, None)
def get_v(self, S):
pass
def get_q(self, S, A):
pass
def sample_z(self, mu, sig):
# Using reparameterization trick to sample from a gaussian
eps = Variable(torch.randn_like(mu))
return mu + sig * eps
def init_episode(self, S0, action_return="normal"):
if isinstance(S0, np.ndarray):
S = torch.from_numpy(S0.astype(np.float32)).view(1, -1, self.input_size)
output, self.hc = self.rnn(S)
mua = self.out_s2mua(output)
siga = torch.exp(self.out_s2log_siga(output).clamp(LOG_STD_MIN, LOG_STD_MAX))
a = np.tanh(self.sample_z(mua, siga).cpu().detach()).numpy()
if action_return == "mean":
return np.tanh(mua.cpu().detach().numpy())
else:
return a
def select(self, S, action_return="normal"):
# output action
if isinstance(S, np.ndarray):
S = torch.from_numpy(S.astype(np.float32)).view(1, -1, self.input_size)
output, self.hc = self.rnn(S, self.hc)
mua = self.out_s2mua(output)
siga = torch.exp(self.out_s2log_siga(output).clamp(LOG_STD_MIN, LOG_STD_MAX))
a = np.tanh(self.sample_z(mua, siga).cpu().detach()).numpy()
if action_return == "mean":
return np.tanh(mua.cpu().detach().numpy())
else:
return a
def learn(
self, S, SP, R, A, D, V, seq_len=64, computation="explicit", grad_clip=False
):
if isinstance(S, np.ndarray):
S = torch.from_numpy(S)
if isinstance(SP, np.ndarray):
SP = torch.from_numpy(SP)
if isinstance(R, np.ndarray):
R = torch.from_numpy(R)
if isinstance(A, np.ndarray):
A = torch.from_numpy(A)
if isinstance(D, np.ndarray):
D = torch.from_numpy(D)
if isinstance(V, np.ndarray):
V = torch.from_numpy(V)
S_sampled = torch.zeros(
[S.size()[0], seq_len, S.size()[-1]], dtype=torch.float32
)
SP_sampled = torch.zeros(
[SP.size()[0], seq_len, SP.size()[-1]], dtype=torch.float32
)
A_sampled = torch.zeros(
[A.size()[0], seq_len, A.size()[-1]], dtype=torch.float32
)
D_sampled = torch.zeros(
[D.size()[0], seq_len, D.size()[-1]], dtype=torch.float32
)
R_sampled = torch.zeros(
[R.size()[0], seq_len, R.size()[-1]], dtype=torch.float32
)
V_sampled = torch.zeros(
[V.size()[0], seq_len, V.size()[-1]], dtype=torch.float32
)
stps_burnin = 40
for b in range(S.size()[0]):
v = V.cpu().numpy().reshape([S.size()[0], S.size()[1]])
stps = np.sum(v[b], axis=0).astype(int)
start_index = np.random.randint(
-seq_len + 1, stps - 1
) # TODO: why sample from negatives?
for tmp, TMP in zip(
(S_sampled, A_sampled, D_sampled, R_sampled, V_sampled, SP_sampled),
(S, A, D, R, V, SP),
):
# select the [max(0, start_idx), min(start_idx+seq_len, stps)] sub-traj
sub_seq_start = max(0, start_index)
sub_seq_end = min(start_index + seq_len, stps)
tmp[b, : sub_seq_end - sub_seq_start] = TMP[
b, sub_seq_start:sub_seq_end
]
init_hc = (
torch.zeros([1, S.size()[0], self.shared_layers], dtype=torch.float32),
torch.zeros([1, S.size()[0], self.shared_layers], dtype=torch.float32),
)
if start_index < 1:
pass
else: # use the previous stps_burnin sub-traj before selected sub-traj as burn-in
_, hcs = self.rnn(
S[
b : b + 1, max(0, start_index - stps_burnin) : start_index
].transpose(0, 1)
)
init_hc[0][:, b, :] = hcs[0][:, 0, :]
init_hc[1][:, b, :] = hcs[1][:, 0, :]
S_sampled = S_sampled.transpose(
0, 1
).data # new shape: num_steps x batch_size x n_neurons
SP_sampled = SP_sampled.transpose(0, 1).data
A_sampled = A_sampled.transpose(0, 1).data
R_sampled = R_sampled.transpose(0, 1).data
V_sampled = V_sampled.transpose(0, 1).data
D_sampled = D_sampled.transpose(0, 1).data
gamma = self.gamma
reward_scale = self.reward_scale
if isinstance(self.beta_h, str):
beta_h = torch.exp(self.log_beta_h).data
else:
beta_h = np.float32(self.beta_h)
SS_sampled = torch.cat((S_sampled[0:1], SP_sampled), dim=0)
output, _ = self.rnn(
SS_sampled, init_hc
) # NOTE: no reward or action input ot RNN!
q_tensor_1 = self.out_sa2q1(torch.cat((output[:-1], A_sampled), dim=-1))
q_tensor_2 = self.out_sa2q2(torch.cat((output[:-1], A_sampled), dim=-1))
mua_tensor = self.out_s2mua(output[:-1])
siga_tensor = torch.exp(
self.out_s2log_siga(output[:-1]).clamp(LOG_STD_MIN, LOG_STD_MAX)
)
v_tensor = self.out_s2v(output[:-1])
vp_tensor = self.out_s2v_tar(output[1:])
# ------ explicit computing---------------
if computation == "explicit":
# ------ loss_v ---------------
sampled_u = self.sample_z(mua_tensor.data, siga_tensor.data).data
sampled_a = torch.tanh(sampled_u)
output, _ = self.rnn(S_sampled)
sampled_q1 = self.out_sa2q1(torch.cat((output, sampled_a), dim=-1)).data
sampled_q2 = self.out_sa2q2(torch.cat((output, sampled_a), dim=-1)).data
sampled_q = torch.min(sampled_q1, sampled_q2)
q_exp = sampled_q
log_pi_exp = torch.sum(
-(mua_tensor.data - sampled_u.data).pow(2)
/ (siga_tensor.data.pow(2))
/ 2
- torch.log(siga_tensor.data * torch.tensor(2.5066)),
dim=-1,
keepdim=True,
)
log_pi_exp -= torch.sum(
torch.log(1.0 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True
)
v_tar = (q_exp - beta_h * log_pi_exp.data).detach().data
loss_v = 0.5 * self.mse_loss(v_tensor * V_sampled, v_tar * V_sampled)
loss_q = 0.5 * self.mse_loss(
q_tensor_1 * V_sampled,
(
reward_scale * R_sampled
+ (1 - D_sampled) * gamma * vp_tensor.detach().data
)
* V_sampled,
) + 0.5 * self.mse_loss(
q_tensor_2 * V_sampled,
(
reward_scale * R_sampled
+ (1 - D_sampled) * gamma * vp_tensor.detach().data
)
* V_sampled,
)
loss_critic = loss_v + loss_q
# -------- loss_a ---------------
sampled_u = Variable(
self.sample_z(mua_tensor.data, siga_tensor.data), requires_grad=True
)
sampled_a = torch.tanh(sampled_u)
output, _ = self.rnn(S_sampled)
sampled_q1 = self.out_sa2q1(torch.cat((output, sampled_a), dim=-1))
sampled_q2 = self.out_sa2q2(torch.cat((output, sampled_a), dim=-1))
Q_tmp = torch.min(sampled_q1, sampled_q2)
Q_tmp.backward(torch.ones_like(Q_tmp))
PQPU = sampled_u.grad.data # \frac{\partial Q}{\partial a}
eps = (sampled_u.data - mua_tensor.data) / (
siga_tensor.data
) # action noise quantity
a = sampled_a.data # action quantity
grad_mua = (beta_h * (2 * a) - PQPU).data * V_sampled.repeat_interleave(
a.size()[-1], dim=-1
) + REG * mua_tensor * V_sampled.repeat_interleave(a.size()[-1], dim=-1)
grad_siga = (
-beta_h / (siga_tensor.data + EPS) + 2 * beta_h * a * eps - PQPU * eps
).data * V_sampled.repeat_interleave(
a.size()[-1], dim=-1
) + REG * siga_tensor * V_sampled.repeat_interleave(
a.size()[-1], dim=-1
)
self.optimizer_v.zero_grad()
loss_critic.backward(retain_graph=True)
if grad_clip:
nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer_v.step()
self.optimizer_a.zero_grad()
mua_tensor.backward(
grad_mua / torch.ones_like(mua_tensor, dtype=torch.float32).sum(),
retain_graph=True,
)
siga_tensor.backward(
grad_siga / torch.ones_like(siga_tensor, dtype=torch.float32).sum()
)
if grad_clip:
nn.utils.clip_grad_value_(self.parameters(), 1.0)
self.optimizer_a.step()
# Using Torch API for computing log_prob
# ------ implicit computing---------------
elif computation == "implicit":
# --------- loss_v ------------
mu_prob = dis.Normal(mua_tensor, siga_tensor)
sampled_u = mu_prob.sample()
sampled_a = torch.tanh(sampled_u)
log_pi_exp = torch.sum(
mu_prob.log_prob(sampled_u), dim=-1, keepdim=True
) - torch.sum(torch.log(1 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True)
output, _ = self.rnn(S_sampled)
sampled_q1 = self.out_sa2q1(torch.cat((output, sampled_a), dim=-1)).data
sampled_q2 = self.out_sa2q2(torch.cat((output, sampled_a), dim=-1)).data
sampled_q = torch.min(sampled_q1, sampled_q2)
q_exp = sampled_q
v_tar = (q_exp - beta_h * log_pi_exp.data).detach().data
loss_v = 0.5 * self.mse_loss(v_tensor * V_sampled, v_tar * V_sampled)
# NOTE: this masked method uses constant denominator, which is inaccurate
loss_q = 0.5 * self.mse_loss(
q_tensor_1 * V_sampled,
(
reward_scale * R_sampled
+ (1 - D_sampled) * gamma * vp_tensor.detach().data
)
* V_sampled,
) + 0.5 * self.mse_loss(
q_tensor_2 * V_sampled,
(
reward_scale * R_sampled
+ (1 - D_sampled) * gamma * vp_tensor.detach().data
)
* V_sampled,
)
loss_critic = loss_v + loss_q
# ----------- loss_a ---------------
mu_prob = dis.Normal(mua_tensor, siga_tensor)
sampled_u = mu_prob.rsample()
sampled_a = torch.tanh(sampled_u)
output, _ = self.rnn(S_sampled)
sampled_q1 = self.out_sa2q1(torch.cat((output, sampled_a), dim=-1))
sampled_q2 = self.out_sa2q2(torch.cat((output, sampled_a), dim=-1))
log_pi_exp = torch.sum(
mu_prob.log_prob(sampled_u), dim=-1, keepdim=True
) - torch.sum(torch.log(1 - sampled_a.pow(2) + EPS), dim=-1, keepdim=True)
loss_a = torch.mean(
beta_h * log_pi_exp * V_sampled
- torch.min(sampled_q1, sampled_q2) * V_sampled
) + REG / 2 * (
torch.mean(
(
siga_tensor
* V_sampled.repeat_interleave(siga_tensor.size()[-1], dim=-1)
).pow(2)
)
+ torch.mean(
(
mua_tensor
* V_sampled.repeat_interleave(mua_tensor.size()[-1], dim=-1)
).pow(2)
)
)
total_loss = loss_critic + loss_a
self.optimizer_v.zero_grad()
total_loss.backward()
if grad_clip:
nn.utils.clip_grad_norm_(self.parameters(), 1.0)
nn.utils.clip_grad_value_(self.parameters(), 1.0)
self.optimizer_v.step()
# --------------------------------------------------------------------------
# update entropy coefficient if required
if isinstance(self.beta_h, str):
self.optimizer_e.zero_grad()
# NOTE: log_pi_exp does not masked! it still have invalid items
loss_e = -torch.mean(
self.log_beta_h * (log_pi_exp + self.target_entropy).data
)
loss_e.backward()
self.optimizer_e.step()
# update target V network
state_dict_tar = self.out_s2v_tar.state_dict()
state_dict = self.out_s2v.state_dict()
for key in list(self.out_s2v.state_dict().keys()):
state_dict_tar[key] = 0.995 * state_dict_tar[key] + 0.005 * state_dict[key]
self.out_s2v_tar.load_state_dict(state_dict_tar)
if computation == "implicit":
return loss_v.item(), loss_a.item(), loss_a.item(), loss_q.item()
elif computation == "explicit":
return (
loss_v.item(),
torch.mean(grad_mua).item(),
torch.mean(grad_siga).item(),
loss_q.item(),
)
|
import os
import sys
if __name__:
sys.path.append(os.path.dirname(
os.path.abspath(os.path.dirname(__file__))))
from utils.util import *
class LockerFrame(tk.Frame):
STATE_WAIT = "W"
STATE_USED = "U"
STATE_BROKEN = "B"
STATE_KIOSK = "K"
DEFAULT_MODE = 0
FIX_MODE = 1
UNLOCK_MODE = 2
def __init__(self, parent, controller, page, mode=0, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.controller = controller
self.page = page
self.mode = mode
self.size = None
self.color_dict = {
f"{self.STATE_WAIT}": ("#A93226", "#CD6155") if self.page == "FindPage" else ("#385ab7", "#496bc9") if self.mode == self.UNLOCK_MODE else ("#1E8449", "#2ECC71"),
f"{self.STATE_USED}": ("#1E8449", "#2ECC71") if self.page == "FindPage" else ("#385ab7", "#496bc9") if self.mode == self.UNLOCK_MODE else ("#A93226", "#CD6155"),
f"{self.STATE_BROKEN}": ("#7C7877", "#7C7877"),
f"{self.STATE_KIOSK}": ("#7C7877", "#7C7877")
}
self.button_dict = {}
self.__show_locker()
def __show_locker(self):
"""
json을 참조하여 사물함을 보여줍니다.
grid 형태로 나타내어지기 때문에 frame에 pack으로 표시되어있는 상태에서는 사용될 수 없습니다.
"""
try:
with open("data/information.json") as f:
import json
json_object = json.load(f)
locker_list = sorted(
json_object["CRRInfo"], key=lambda dic: dic["location"]["start"]["row"]
)
self.size = (json_object["LCKSize"]["width"],
json_object["LCKSize"]["height"])
for json_data in locker_list:
self.__make_locker_button(json_data)
except Exception as e:
raise e
def __make_locker_button(self, json_data):
"""
json 데이터를 가지고 버튼을 생성하여 나타냅니다.
버튼은 세 가지 버튼으로 만들어집니다.
사물(택배)함이 고장났을 경우
회색의 사물(택배)함 버튼이 만들어지며 누를 경우 사용할 수 없다는 경고창이 발생합니다.
사물(택배)함이 고장나지 않은 경우
- 페이지에 따라 다르게 보여집니다.
빨간색의 사물(택배)함 버튼이 만들어지며 누를 경우 사용할 수 없다는 경고창이 발생합니다.
초록색의 사물(택배)함 버튼이 만들어지며 누를 경우 사용관련 창으로 넘어갑니다.
"""
locker_width, locker_height = self.size
img_size = 170 - 40*(max(locker_height, locker_width)-1)
button_size = 250 - 50*(max(locker_width, locker_height)-1)
text_font = tkfont.Font(
family="a시월구일1",
size=20-2*(max(locker_width, locker_height)-1),
weight="bold")
locker_image = ImageTk.PhotoImage(Image.open(
"../img/lockers.png" if __name__ == "__main__" or __name__ == "locker_frame" else "src/img/lockers.png"
).resize((img_size, img_size)))
kiosk_image = ImageTk.PhotoImage(Image.open(
"../img/kiosk.png" if __name__ == "__main__" or __name__ == "locker_frame" else "src/img/kiosk.png"
).resize((img_size, img_size)))
location = json_data["location"]
width = location["width"]
height = location["height"]
state = json_data["useState"]
locker_number = json_data["CRRNo"]
CRRMngKey = json_data["CRRMngKey"]
def decide_function():
"""
함이 어디 페이지에 위치해있으며 상태값이 어떤지에 따라 그에 걸맞게 함수를 지정해줍니다
"""
# useState == 'U' when FindPage, useState == 'W' when DeliveryPage
if state == self.STATE_USED and self.page == "FindPage" or state == self.STATE_WAIT and self.page == "DeliveryPage":
return lambda CRRMngKey=CRRMngKey: self.controller.show_frame("InformationPage", frame=self.parent, CRRMngKey=CRRMngKey, mode=0, page=self.page)
elif self.page == "SettingPage":
if state == self.STATE_KIOSK:
return
elif self.mode == self.FIX_MODE:
return lambda CRRMngKey=CRRMngKey: self.parent.set_locker(CRRMngKey, state, locker_number)
elif self.mode == self.UNLOCK_MODE:
return lambda CRRMngKey=CRRMngKey: self.parent.force_open_door(CRRMngKey)
# useState == 'B' or 'U' when deliveryPage, 'W' when FindPage
else:
return lambda: MessageFrame(self.controller, "해당 함을 사용할 수 없습니다.")
button = SMLButton(master=self,
fg_color=self.color_dict[json_data["useState"]][0],
hover_color=self.color_dict[json_data["useState"]][1],
image=locker_image if json_data["useState"] != self.STATE_KIOSK else kiosk_image,
border_width=1,
corner_radius=10,
text=locker_number if json_data["useState"] != self.STATE_KIOSK else "",
text_font=text_font,
width=button_size*width,
height=button_size*height,
command=decide_function()
)
button.grid(row=location["start"]["row"], column=location["start"]
["col"], rowspan=height, columnspan=width)
self.button_dict[locker_number] = button
|
""" This is a function for enabling data capture.
Checks these 2 things of the input data:
- features (aka input schema)
- descriptive statistics about input features
"""
from urllib.parse import urlparse
from time import gmtime, strftime, sleep
import time
from threading import Thread
import boto3
import pandas as pd
from sagemaker import session
from sagemaker import RealTimePredictor
from sagemaker.model_monitor import (
CronExpressionGenerator,
DefaultModelMonitor,
DataCaptureConfig,
)
from sagemaker import get_execution_role
from sagemaker.model_monitor.dataset_format import DatasetFormat
sm_client = boto3.client("sagemaker")
sm_session = session.Session(boto3.Session())
s3_client = boto3.Session().client("s3")
role = "${module.step-functions.iam_role_arn}"
# give a name to the data drift monitor job
mon_schedule_name = "${var.data_drift_monitor_name}"
endpoint_name = "${var.endpoint_name}"
frequency = "${var.data_drift_monitoring_frequency}"
# define a url path for the captured data output
s3_capture_upload_path = (
"s3://${aws_s3_bucket.ml_bucket[0].id}/monitor_output/endpoint-data-capture"
)
# define the url path for train data which is the baseline data
baseline_data_uri = "s3://${aws_s3_bucket.ml_bucket[0].id}/" + "${var.train_key}"
baseline_results_uri = (
"s3://${aws_s3_bucket.ml_bucket[0].id}/monitor_output/baseline-results"
)
# define an url for the data drift monitor report
s3_report_path = (
"s3://${aws_s3_bucket.ml_bucket[0].id}/monitor_output/data-drift-monitor-results"
)
# you can also choose hourly, or daily_every_x_hours(hour_interval, starting_hour=0)
def monitor_frequency(interval=frequency, hour_interval=None, starting_hour=None):
# this allows users to define the frequency of data drift monitoring
if interval == "daily":
monitoring_frequency = CronExpressionGenerator.daily()
if interval == "hourly":
monitoring_frequency = CronExpressionGenerator.hourly()
if interval == "others":
monitoring_frequency = CronExpressionGenerator.daily_every_x_hours(
hour_interval, starting_hour
)
return monitoring_frequency
# Change parameters as you would like - adjust sampling percentage,
# chose to capture request or response or both.
data_capture_config = DataCaptureConfig(
enable_capture=True,
sampling_percentage="${var.data_drift_sampling_percent}",
destination_s3_uri=s3_capture_upload_path,
kms_key_id=None,
capture_options=["REQUEST", "RESPONSE"],
csv_content_types=["text/csv"],
json_content_types=["application/json"],
)
# Now it is time to apply the new configuration and wait for it to be applied
predictor = RealTimePredictor(endpoint=endpoint_name)
predictor.update_data_capture_config(data_capture_config=data_capture_config)
sm_session.wait_for_endpoint(endpoint=endpoint_name)
my_default_monitor = DefaultModelMonitor(
role=role,
instance_count="${var.training_job_instance_count}",
instance_type="${var.training_job_instance_type}",
volume_size_in_gb="${var.training_job_instance_type}",
max_runtime_in_seconds="${var.data_drift_job_timeout_in_sec}",
)
# now ask Sagemaker to suggest baseline stats
my_default_monitor.suggest_baseline(
baseline_dataset=baseline_data_uri + "/train.csv",
dataset_format=DatasetFormat.csv(header=True),
output_s3_uri=baseline_results_uri,
wait=True,
)
my_default_monitor.create_monitoring_schedule(
monitor_schedule_name=mon_schedule_name,
endpoint_input=predictor.endpoint,
output_s3_uri=s3_report_path,
statistics=my_default_monitor.baseline_statistics(),
constraints=my_default_monitor.suggested_constraints(),
schedule_cron_expression=monitor_frequency,
enable_cloudwatch_metrics=True,
)
baseline_job = my_default_monitor.latest_baselining_job
schema_df = pd.io.json.json_normalize(
baseline_job.baseline_statistics().body_dict["features"]
)
constraints_df = pd.io.json.json_normalize(
baseline_job.suggested_constraints().body_dict["features"]
)
desc_schedule_result = my_default_monitor.describe_schedule()
mon_executions = my_default_monitor.list_executions()
def lambda_handler(event, context):
"""
Inspect a specific execution (latest execution).
Here are the possible terminal states and what each of them mean:
- Completed - This means the monitoring execution completed and no issues were found in the violations report.
- CompletedWithViolations - This means the execution completed, but constraint violations were detected.
- Failed - The monitoring execution failed, maybe due to client error (perhaps incorrect role premissions) or infrastructure issues. Further examination of FailureReason and ExitMessage is necessary to identify what exactly happened.
- Stopped - job exceeded max runtime or was manually stopped.
"""
# latest execution's index is -1, second to last is -2 and so on..
latest_execution = mon_executions[-1]
time.sleep(60)
latest_execution.wait(logs=False)
latest_job = latest_execution.describe()
report_uri = latest_execution.output.destination
# list the generated reports
s3uri = urlparse(report_uri)
report_bucket = s3uri.netloc
report_key = s3uri.path.lstrip("/")
result = s3_client.list_objects(Bucket=report_bucket, Prefix=report_key)
report_files = [report_file.get("Key") for report_file in result.get("Contents")]
# get the latest violations report
latest_monitoring_violations = (
my_default_monitor.latest_monitoring_constraint_violations()
)
pd.set_option("display.max_colwidth", -1)
constraints_violations_df = pd.io.json.json_normalize(
latest_monitoring_violations.body_dict["violations"]
)
# get the latest violation stats
latest_monitoring_statistics = my_default_monitor.latest_monitoring_statistics()
# get the status of the latest execution result
latest_result_status = latest_execution.describe()["ExitMessage"]
# Delete the resources after running the inspection to avoid incurring additional charges
my_default_monitor.delete_monitoring_schedule()
time.sleep(60) # actually wait for the deletion
predictor.delete_endpoint()
predictor.delete_model()
return {
latest_result_status,
report_uri,
constraints_violations_df,
latest_monitoring_statistics,
}
|
import json
import logging.config
import os
def load_config(filepath):
"""Config dictionary
Args:
filepath (str):
Returns:
dict:
Raises:
ImportError:
FileNotFoundError:
"""
if os.path.exists(filepath):
_, ext = os.path.splitext(filepath)
if ext == '.json':
with open(filepath, 'rt') as fp:
return json.load(fp)
elif ext in ('.yml', '.yaml'):
try:
from ruamel import yaml
except ImportError():
import yaml
except ImportError():
raise ImportError('Install ruamel.yaml or pyyaml in \n'
'order to use load dictionary \n'
'configuration from yaml file. \n')
with open(filepath, 'rt') as fp:
return yaml.safe_load(fp.read())
raise ValueError('File extension {ext} is not supported.'.format(
ext=ext))
else:
raise FileNotFoundError(
'Configuration file: "{path}" doesn\'t exist.'.format(
path=filepath))
def setup_logging(path_or_config, logdir='.logs', env_key='LOG_CFG'):
"""Setup logging configurations defined by dict configuration.
Args:
path_or_config (str|dict):
- dict: Dictionary config
- str: Path to load dictionary configuration from. Can be json or yaml file.
logdir (str|None):
- None: Saves logfiles to current working directory
- str: Saves logfiles to specified directory.
env_key (str):
Environment key for setting path to logging conf.
References:
- https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
Todo:
- Add support for .ini and .cfg files
"""
path_or_config = os.getenv(env_key, path_or_config)
if isinstance(path_or_config, str):
config = load_config(path_or_config)
elif isinstance(path_or_config, dict):
config = path_or_config
else:
raise TypeError("Argument 'path_or_config' should be string or "
"dictionary.")
# Configure directory to save logfiles
if logdir:
# Create directory if it doesnt already exist.
try:
os.makedirs(logdir, 0o700, exist_ok=True)
except FileExistsError:
pass
# Prepend directory path to filenames.
for name in config['handlers']:
handler = config['handlers'][name]
if 'filename' in handler:
handler['filename'] = os.path.join(logdir, handler['filename'])
# Configure logging for config dictionary
logging.config.dictConfig(config)
|
import os
import yaml
# Set defaults
system = dict(
use_acct_db = False,
use_cors = False,
debug = False,
host = '0.0.0.0',
port = 5000,
serve_index = False,
archive_usage_load = False,
archive_wait_time = 60,
archive_path = 'db.json',
)
# Change to yaml values
ymlfile = None
if os.path.exists("config.yml"):
ymlfile = open("config.yml", 'r')
elif os.path.exists(os.environ['SLURM_REST_API_CONFIG']):
ymlfile = open(os.environ['SLURM_REST_API_CONFIG'], 'r')
if ymlfile is not None:
try:
cfg = yaml.load(ymlfile)
if cfg is not None and 'system' in cfg:
for n in cfg['system']:
system[n] = cfg['system'][n]
finally:
ymlfile.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from bioseq.models.Variant import Variant
from pdbdb.models import Residue, PDBResidueSet, Property, ResidueSet
class PDBVariant(models.Model):
variant = models.ForeignKey(Variant, models.CASCADE, "pdb_variants")
residue = models.ForeignKey(Residue, models.CASCADE, "pdb_variants")
class Meta:
managed = True
db_table = 'pdbvariant'
def ann(self):
data = []
for pdb_rs in self.residue.residue_sets.all():
# TODO Breaks encapsulation from pocket residue set
if pdb_rs.pdbresidue_set.residue_set.name == PDBResidueSet.pocket_name:
rs_ann = f'In pocket {pdb_rs.pdbresidue_set.name} with druggability {pdb_rs.pdbresidue_set.properties_dict()[Property.druggability]}'
elif ResidueSet.BINDING_SITE in pdb_rs.pdbresidue_set.residue_set.name:
# bl = pdb_rs.pdbresidue_set.properties_dict().get(Property.bound_ligand, "")
# bl = f' bound to {bl}' if bl else ""
# rs_ann = f'{pdb_rs.pdbresidue_set.residue_set.name} {pdb_rs.pdbresidue_set.name} {bl}'
rs_ann = pdb_rs.pdbresidue_set.description
else:
rs_ann = f'{pdb_rs.pdbresidue_set.residue_set.name} {pdb_rs.pdbresidue_set.name}'
data.append(
{"name": pdb_rs.pdbresidue_set.name, "type": pdb_rs.pdbresidue_set.residue_set.name, "desc": rs_ann})
return data
|
import io
from codenode.base import CodeNode
from typing import Union
DumpStream = Union[io.StringIO, io.TextIOBase]
class CodeNodeWriter:
def node_to_lines(self, node: CodeNode):
stack = [(node, 0, node.total())]
while stack:
node, depth, iterator = stack[-1]
try:
item = next(iterator)
except StopIteration:
stack.pop()
continue
if isinstance(item, CodeNode):
stack.append(
(item, depth+node.child_depth_offset, item.total())
)
else:
yield depth, item
def dump(
self,
node: CodeNode,
stream: DumpStream,
indent=None,
base_depth=0
):
if indent is None:
from codenode.util.constants import default_indent
indent = default_indent
for depth, line in node.to_lines():
stream.write(indent*(depth+base_depth))
stream.write(line)
stream.write('\n')
def dumps(
self,
node: CodeNode,
indent=None,
base_depth=0
):
string_io = io.StringIO()
self.dump(node, string_io, indent, base_depth)
return string_io.getvalue()
default_writer = CodeNodeWriter()
|
import re
from datetime import date, datetime
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class SpSumareSpider(BaseGazetteSpider):
TERRITORY_ID = "3552403"
allowed_domains = ["sumare.sp.gov.br"]
name = "sp_sumare"
start_urls = ["https://www.sumare.sp.gov.br/Diario.Oficial.php?edicao=todas"]
base_url = "https://www.sumare.sp.gov.br/"
start_date = date(2011, 2, 11)
end_date = date.today()
def parse(self, response):
gazettes = response.css("li.umDO")
gazettes.reverse()
for gazette in gazettes:
title = gazette.css("a::attr(title)").get()
url = gazette.css("a::attr(href)").get()
str_date = re.search(r"\d+/\d+/\d{4}", title).group(0)
date = datetime.strptime(str_date, "%d/%m/%Y").date()
if not (self.start_date <= date <= self.end_date):
continue
yield Gazette(
edition_number=re.search(r"\d+", title).group(0),
date=date,
file_urls=[f"{self.base_url}{url}"],
is_extra_edition="extra" in title.lower(),
power="executive_legislative",
)
|
"""
Admin
"""
from django.contrib import admin
from .models import Quiz, Question, Result
admin.site.register(Quiz)
admin.site.register(Question)
admin.site.register(Result)
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext, gettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Construct and return a model instance from the bound ``form``'s
``cleaned_data``, but do not save the returned instance to the database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and
form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name))):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Return a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, return only the
named.
``exclude`` is an optional list of field names. If provided, exclude the
named from the returned dict, even if they are listed in the ``fields``
argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def apply_limit_choices_to_to_formfield(formfield):
"""Apply limit_choices_to to the formfield's queryset if needed."""
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None, *, apply_limit_choices_to=True):
"""
Return an ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, return only the
named fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
``apply_limit_choices_to`` is a boolean indicating if limit_choices_to
should be applied to a field's queryset.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
if apply_limit_choices_to:
apply_limit_choices_to_to_formfield(formfield)
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions:
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, str) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model, opts.fields, opts.exclude, opts.widgets,
formfield_callback, opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages, opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = {k for k, v in fields.items() if not v}
missing_fields = none_model_fields.difference(new_class.declared_fields)
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None,
renderer=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super().__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
renderer=renderer,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, exclude several types of fields from model
validation. See tickets #12507, #12521, #12553.
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors:
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Call the instance's validate_unique() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Return a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, include only
the named fields in the returned fields. If omitted or '__all__', use all
fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
bases = (form.Meta,) if hasattr(form, 'Meta') else ()
Meta = type('Meta', bases, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, *, initial=None, **kwargs):
self.queryset = queryset
self.initial_extra = initial
super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs})
def initial_form_count(self):
"""Return the number of forms that are required in this FormSet."""
if not self.is_bound:
return len(self.get_queryset())
return super().initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
pk_required = i < self.initial_form_count()
if pk_required:
if self.is_bound:
pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name)
try:
pk = self.data[pk_key]
except KeyError:
# The primary key is missing. The user may have tampered
# with POST data.
pass
else:
to_python = self._get_to_python(self.model._meta.pk)
try:
pk = to_python(pk)
except ValidationError:
# The primary key exists but is an invalid value. The
# user may have tampered with POST data.
pass
else:
kwargs['instance'] = self._existing_object(pk)
else:
kwargs['instance'] = self.get_queryset()[i]
elif self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
form = super()._construct_form(i, **kwargs)
if pk_required:
form.fields[self.model._meta.pk.name].required = True
return form
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Save and return a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Save and return an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""
Save model instances for every form, adding and changing instances
as necessary, and return the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(
d._get_pk_val() if hasattr(d, '_get_pk_val')
# Prevent "unhashable type: list" errors later on.
else tuple(d) if isinstance(d, list)
else d for d in row_data
)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return gettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, _("and")),
}
def get_date_error_message(self, date_check):
return gettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': str(date_check[1]),
}
def get_form_error(self):
return gettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
# If the pk is None, it means either:
# 1. The object is an unexpected empty model, created by invalid
# POST data such as an object outside the formset's queryset.
# 2. The object was already deleted from the database.
if obj.pk is None:
continue
if form in forms_to_delete:
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, (ForeignKey, OneToOneField)):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super().add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""Return a FormSet class for the given Django model class."""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super().initial_form_count()
def _construct_form(self, i, **kwargs):
form = super()._construct_form(i, **kwargs)
if self.save_as_new:
mutable = getattr(form.data, '_mutable', None)
# Allow modifying an immutable QueryDict.
if mutable is not None:
form.data._mutable = True
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
if mutable is not None:
form.data._mutable = mutable
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
return super().save_new(form, commit=commit)
def add_fields(self, form, index):
super().add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
# The InlineForeignKeyField assumes that the foreign key relation is
# based on the parent model's pk. If this isn't the case, set to_field
# to correctly resolve the initial form value.
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super().get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Find and return the ForeignKey from model to parent if there is one
(return None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, raise an exception if there isn't a ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif not fks_to_parent:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif not fks_to_parent:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Return an ``InlineFormSet`` for the given kwargs.
``fk_name`` must be provided if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline value did not match the parent instance.'),
}
def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs):
self.parent_instance = parent_instance
self.pk_field = pk_field
self.to_field = to_field
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super().__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if str(value) != str(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator:
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
# count() adds a query but uses less memory since the QuerySet results
# won't be cached. In most cases, the choices will only be iterated on,
# and __len__() won't be called.
return self.queryset.count() + (1 if self.field.empty_label is not None else 0)
def __bool__(self):
return self.field.empty_label is not None or self.queryset.exists()
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
iterator = ModelChoiceIterator
def __init__(self, queryset, *, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
**kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(
self, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, **kwargs
)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this form field.
If it is a callable, invoke it and return the result.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
if self.queryset is not None:
result.queryset = self.queryset.all()
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = None if queryset is None else queryset.all()
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
Convert objects into strings and generate the labels for the choices
presented by this object. Subclasses can override this method to
customize the display of the choices.
"""
return str(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super().prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
if self.disabled:
return False
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return str(self.prepare_value(initial_value)) != str(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value.')
}
def __init__(self, queryset, **kwargs):
super().__init__(queryset, empty_label=None, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, return a QuerySet of the
corresponding objects. Raise a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = {str(getattr(o, key)) for o in qs}
for val in value:
if str(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, str) and
not hasattr(value, '_meta')):
prepare_value = super().prepare_value
return [prepare_value(v) for v in value]
return super().prepare_value(value)
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = {str(value) for value in self.prepare_value(initial)}
data_set = {str(value) for value in data}
return data_set != initial_set
def modelform_defines_fields(form_class):
return hasattr(form_class, '_meta') and (
form_class._meta.fields is not None or
form_class._meta.exclude is not None
)
|
import torch
import torch.nn as nn
def focal_loss(input, target, alpha=0.25, gamma=2.):
'''
Args:
input: prediction, 'batch x c x h x w'
target: ground truth, 'batch x c x h x w'
alpha: hyper param, default in 0.25
gamma: hyper param, default in 2.0
Reference: Focal Loss for Dense Object Detection, ICCV'17
'''
pos_inds = target.eq(1).float()
neg_inds = target.lt(1).float()
loss = 0
pos_loss = torch.log(input) * torch.pow(1 - input, gamma) * pos_inds * alpha
neg_loss = torch.log(1 - input) * torch.pow(input, gamma) * neg_inds * (1 - alpha)
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss.mean()
def focal_loss_cornernet(input, target, gamma=2.):
'''
Args:
input: prediction, 'batch x c x h x w'
target: ground truth, 'batch x c x h x w'
gamma: hyper param, default in 2.0
Reference: Cornernet: Detecting Objects as Paired Keypoints, ECCV'18
'''
pos_inds = target.eq(1).float()
neg_inds = target.lt(1).float()
neg_weights = torch.pow(1 - target, 4)
loss = 0
pos_loss = torch.log(input) * torch.pow(1 - input, gamma) * pos_inds
neg_loss = torch.log(1 - input) * torch.pow(input, gamma) * neg_inds * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss.mean()
|
from memory.space import Bank, START_ADDRESS_SNES, Write
import instruction.asm as asm
import instruction.f0 as f0
import args
import data.event_bit as event_bit
from constants.gates import character_checks
import constants.objectives.condition_bits as condition_bits
import menus.pregame_track_scroll_area as scroll_area
# 0x0002 is not 0xffff after a battle (bug? c2d450), use 0x1202 instead
constant_ffff = 0x1202 # always contains value 0xffff
class Checks(scroll_area.ScrollArea):
MENU_NUMBER = 13
def __init__(self):
self.check_bits = {}
for name_bit in condition_bits.check_bit:
self.check_bits[name_bit.name] = name_bit.bit
self.check_bits["Auction1"] = event_bit.AUCTION_BOUGHT_ESPER1
self.check_bits["Auction2"] = event_bit.AUCTION_BOUGHT_ESPER2
if args.character_gating:
self.character_gating_init()
else:
self.open_world_init()
super().__init__()
def line_color_function(self, address, bit):
src = [
asm.LDA(address, asm.ABS),
asm.AND(2 ** bit, asm.IMM8),
asm.BEQ("FALSE"),
asm.JMP(f0.set_gray_text_color, asm.ABS),
"FALSE",
asm.JMP(f0.set_user_text_color, asm.ABS),
]
space = Write(Bank.F0, src, f"menu checks line color function {hex(address)} {hex(bit)}")
return space.start_address
def open_world_init(self):
checks = []
for group in character_checks.values():
checks += group
checks = sorted(checks)
self.lines = []
self.line_skip_bits = []
for check in checks:
check_address = event_bit.address(self.check_bits[check])
check_bit = event_bit.bit(self.check_bits[check])
color_function = self.line_color_function(check_address, check_bit)
self.lines.append(scroll_area.Line(check, color_function))
self.line_skip_bits.append((constant_ffff, 0x01)) # never skip
def character_gating_init(self):
from data.characters import Characters
self.lines = []
self.line_skip_bits = []
for character, checks in character_checks.items():
if not character:
character = "Open"
character_address = constant_ffff # always 0xffff
character_bit = 0x01 # any bit
else:
character_id = Characters.DEFAULT_NAME.index(character.upper())
character_event_bit = event_bit.character_recruited(character_id)
character_address = event_bit.address(character_event_bit)
character_bit = event_bit.bit(character_event_bit)
self.lines.append(scroll_area.Line(character, f0.set_blue_text_color))
self.line_skip_bits.append((character_address, character_bit))
for check in checks:
check_address = event_bit.address(self.check_bits[check])
check_bit = event_bit.bit(self.check_bits[check])
color_function = self.line_color_function(check_address, check_bit)
self.lines.append(scroll_area.Line(" " + check, color_function))
self.line_skip_bits.append((character_address, character_bit))
self.lines.append(scroll_area.Line("", f0.set_user_text_color))
self.line_skip_bits.append((character_address, character_bit))
del self.lines[-1]
del self.line_skip_bits[-1]
def initialize_mod(self):
src = []
for address_bit in self.line_skip_bits:
src += [
address_bit[0].to_bytes(2, "little"),
2 ** address_bit[1],
]
space = Write(Bank.F0, src, "menu checks post invoke byte bit table")
byte_bit_table = space.start_address
# write out which indices of self.lines to display
# pad end of scroll area with empty lines (represented by index 0xff)
# NOTE: using 256 bytes of ram starting at 0x7e2000
src = [
asm.TDC(),
asm.LDY(0x0000, asm.IMM16), # y = self.lines index (character/check line index)
asm.STY(0xe5, asm.DIR), # e5 = menu line index, e6 = byte_bit_table index
"LINE_LOOP_START",
asm.LDA(0xe6, asm.DIR), # a = index of event byte address in byte_bit_table
asm.A16(),
asm.TAX(), # x = index of event byte address in byte_bit_table
asm.LDA(START_ADDRESS_SNES + byte_bit_table, asm.LNG_X), # a = event byte address
asm.TAX(), # x = event byte address
asm.TDC(),
asm.A8(),
asm.LDA(0x00, asm.DIR_X), # a = event byte value
asm.INC(0xe6, asm.DIR),
asm.INC(0xe6, asm.DIR), # e6 = byte_bit_table index of bit (2 byte address, 1 byte event bit)
asm.LDX(0x00, asm.DIR), # clear x register
asm.XY8(),
asm.LDX(0xe6, asm.DIR), # x = byte_bit_table index of bit
asm.XY16(),
asm.AND(START_ADDRESS_SNES + byte_bit_table, asm.LNG_X), # is event bit set in event byte?
asm.BEQ("SKIP_LINE"), # branch if not
asm.LDA(0xe5, asm.DIR), # a = menu line index
asm.TYX(), # x = self.lines index to display at this menu index
asm.STA(0x7e2000, asm.LNG_X),
asm.INY(), # next self.lines index
"SKIP_LINE",
asm.INC(0xe5, asm.DIR), # next menu line index
asm.INC(0xe6, asm.DIR), # next event byte
asm.LDA(0xe5, asm.DIR),
asm.CMP(len(self.lines), asm.IMM8), # finished checking every line in self.lines?
asm.BLT("LINE_LOOP_START"), # branch if not
# fill the rest of the menu with empty lines
asm.LDA(0xff, asm.IMM8),
asm.TYX(), # x = index after last self.lines index written
"PADDING",
asm.CPX(len(self.lines), asm.IMM16),
asm.BGE("RETURN"),
asm.STA(0x7e2000, asm.LNG_X),
asm.INX(),
asm.BRA("PADDING"),
"RETURN",
asm.RTL(),
]
space = Write(Bank.F0, src, "menu checks initialize store line indices to draw")
self.initialize = space.start_address
def initialize_line_mod(self):
src = [
asm.LDX(0x0003, asm.IMM16), # x = 3 (x position to write at)
asm.JSL(scroll_area.set_line_x_pos),
asm.TDC(),
asm.LDA(0xe5, asm.DIR), # menu line index
asm.TAX(),
asm.LDA(0x7e2000, asm.LNG_X), # look up line to write at current menu line
asm.TAY(), # y = character/check line index
asm.CMP(0xff, asm.IMM8), # last line or hidden?
asm.BEQ("AFTER_SET_COLOR"), # branch if so
asm.JSR(self.set_line_color, asm.ABS),
"AFTER_SET_COLOR",
asm.RTL(),
]
space = Write(Bank.F0, src, "menu checks initialize line set line color")
self.initialize_line = space.start_address
|
def initialize():
exit = False
while(exit == False):
print("⚙️ Selecciona el tipo de interfaz:")
print("(0) -> Salir")
print("(1) -> CLI")
print("(2) -> GUI")
n = 100
try:
n = int(input())
except ValueError:
print("⛔ No has introducido un número.")
if n == 0:
exit = True
if n == 1:
import biblioteca.MenuCLI as cli
cli.menu()
exit = True
if n == 2:
import biblioteca.MenuGUI as gui
gui.MainApp.init()
exit = True
if exit == False:
print("❓ Introduce 0, 1 o 2") |
from time import perf_counter as perC
counted=0
"""直前に計測が完了した処理の秒数"""
def funcTimer(functionObject:function)->function:
"""
処理を実行してから完了するまでの時間を計測するデコレータです。
変数countedに計測した秒数を代入します。
"""
def retFunc(*args,**kwards):
global counted
st=perC()
ret=functionObject(*args,**kwards)
en=perC()
counted=en-st
return ret
return retFunc |
from django.contrib import admin
from .models import Banner, Services, Video, Testimonial
admin.site.register(Banner)
admin.site.register(Services)
admin.site.register(Video)
admin.site.register(Testimonial)
|
import numpy as np
class Activation:
"""
Activation function
Meant to be calculated as follows:
f(W * X.T + b)
Where:
- f - non-linear activation function
- X is m (batch,examples) by n (prev units)
- W is m (units) by n (prev units)
- b is intercept term (bias) - vector of n units
"""
@staticmethod
def activate(z):
raise NotImplemented()
@staticmethod
def differentiate(dA, a):
raise NotImplemented()
class Sigmoid(Activation):
@staticmethod
def activate(z):
"""
Parameters
----------
z - linear product (W @ A + b)
Returns
-------
A - [batch * L-units] Activation output from the current layer
"""
return 1 / (1 + np.exp(-z))
@staticmethod
def differentiate(dA, a):
return dA * a * (1 - a)
|
""" common automol parameters
"""
import inspect
import itertools
class ReactionClass:
""" Names of supported reaction classes
"""
TRIVIAL = 'trivial'
# Unimolecular reactions
HYDROGEN_MIGRATION = 'hydrogen migration'
BETA_SCISSION = 'beta scission'
RING_FORM_SCISSION = 'ring forming scission'
ELIMINATION = 'elimination'
# Bimolecular reactions
HYDROGEN_ABSTRACTION = 'hydrogen abstraction'
ADDITION = 'addition'
INSERTION = 'insertion'
SUBSTITUTION = 'substitution'
REVERSE_REACTION_DCT = {
ReactionClass.HYDROGEN_MIGRATION: ReactionClass.HYDROGEN_MIGRATION,
ReactionClass.HYDROGEN_ABSTRACTION: ReactionClass.HYDROGEN_ABSTRACTION,
ReactionClass.ADDITION: ReactionClass.BETA_SCISSION,
ReactionClass.BETA_SCISSION: ReactionClass.ADDITION,
ReactionClass.ELIMINATION: ReactionClass.INSERTION,
ReactionClass.INSERTION: ReactionClass.ELIMINATION,
# ReactionClass.SUBSTITUTION: ?
}
def is_reaction_class(rxn_class):
""" Check if class in list of REACTION CLASS
"""
return rxn_class in _values(ReactionClass)
def reverse_reaction_class(rxn_class):
""" determine the reverse of a reaction class
"""
return REVERSE_REACTION_DCT.get(rxn_class, None)
def _values(cls):
""" list the values of a parameter class
"""
assert inspect.isclass(cls)
vals = tuple(val for val in _public_attributes(cls)
if not inspect.isclass(val))
return vals
def all_values(cls):
""" recursively list the values of a parameter class tree
"""
assert inspect.isclass(cls)
vals = tuple(itertools.chain(*(
[val] if not inspect.isclass(val) else all_values(val)
for val in _public_attributes(cls))))
return vals
def _public_attributes(cls):
return tuple(val for name, val in
inspect.getmembers(cls, lambda x: not inspect.isroutine(x))
if not name.startswith('_') and not inspect.isfunction(val))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Median Maintenance """
import math
def _get_parent(key: int):
if key == 0:
return 0
else:
return (key + 1) // 2 - 1
def _get_children(key: int):
return 2 * key + 1, 2 * key + 2
class MaxHeap:
def __init__(self):
self._arr = []
def insert(self, elem: int):
self._arr.append(elem)
self._bubble_up(len(self._arr) - 1)
def extract_max(self) -> int:
root = self._arr.pop(0)
self._arr.insert(0, self._arr.pop(-1))
self._bubble_down(0)
return root
def get_max(self):
if len(self._arr) > 0:
return self._arr[0]
else:
return -math.inf
def get_size(self):
return len(self._arr)
def _bubble_up(self, key: int):
parent = _get_parent(key)
elem = self._arr[key]
if elem > self._arr[parent]:
self._arr[key] = self._arr[parent]
self._arr[parent] = elem
self._bubble_up(parent)
def _bubble_down(self, key: int):
children = _get_children(key)
num_elems = len(self._arr)
elem = self._arr[key]
if children[0] >= num_elems:
return
elif children[1] >= num_elems:
if elem > self._arr[children[0]]:
self._arr[key] = self._arr[children[0]]
self._arr[children[0]] = elem
self._bubble_down(children[0])
else:
c0 = self._arr[children[0]]
c1 = self._arr[children[1]]
if c0 > c1:
if elem < c0:
self._arr[key] = c0
self._arr[children[0]] = elem
self._bubble_down(children[0])
else:
if elem < c1:
self._arr[key] = c1
self._arr[children[1]] = elem
self._bubble_down(children[1])
class MinHeap:
def __init__(self):
self._arr = []
def insert(self, elem: int):
self._arr.append(elem)
self._bubble_up(len(self._arr) - 1)
def get_min(self):
if len(self._arr) > 0:
return self._arr[0]
else:
return math.inf
def extract_min(self) -> int:
root = self._arr.pop(0)
self._arr.insert(0, self._arr.pop(-1))
self._bubble_down(0)
return root
def get_size(self):
return len(self._arr)
def _bubble_up(self, key: int):
parent = _get_parent(key)
elem = self._arr[key]
if elem < self._arr[parent]:
self._arr[key] = self._arr[parent]
self._arr[parent] = elem
self._bubble_up(parent)
def _bubble_down(self, key: int):
children = _get_children(key)
num_elems = len(self._arr)
elem = self._arr[key]
if children[0] >= num_elems:
return
elif children[1] >= num_elems:
if elem < self._arr[children[0]]:
self._arr[key] = self._arr[children[0]]
self._arr[children[0]] = elem
self._bubble_down(children[0])
else:
c0 = self._arr[children[0]]
c1 = self._arr[children[1]]
if c0 < c1:
if elem > c0:
self._arr[key] = c0
self._arr[children[0]] = elem
self._bubble_down(children[0])
else:
if elem > c1:
self._arr[key] = c1
self._arr[children[1]] = elem
self._bubble_down(children[1])
def main(numbers: list):
high_heap = MinHeap() # stores higher half of elements
low_heap = MaxHeap() # stores lower half of elements
median_sum = 0
for n in numbers:
if n <= low_heap.get_max():
low_heap.insert(n)
else:
high_heap.insert(n)
if low_heap.get_size() > high_heap.get_size() + 1:
high_heap.insert(low_heap.extract_max())
elif low_heap.get_size() + 1 < high_heap.get_size():
low_heap.insert(high_heap.extract_min())
else:
pass
if high_heap.get_size() > low_heap.get_size():
median = high_heap.get_min()
elif high_heap.get_size() < low_heap.get_size():
median = low_heap.get_max()
else:
median = low_heap.get_max()
median_sum += median
return median_sum
if __name__ == "__main__":
fname = "input.txt"
with open(fname, mode="r") as fin:
data = [int(x.strip()) for x in fin.readlines() if x.strip()]
result = main(data)
print("result:", result % 10000)
|
import socket
def threaded_method(self):
sock = socket.socket()
sock.connect(('xkcd.com', 80))
request = 'GET {} HTTP/1.0\r\n\r\n'.format('/353/')
sock.send(request.encode('ascii'))
response = b''
chunk = sock.recv(4096)
while chunk:
response += chunk
chunk = sock.recv(4096)
print(response)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import jinja2.sandbox
class SandboxedEnvironment(jinja2.sandbox.SandboxedEnvironment):
"""SandboxedEnvironment for Airflow task templates."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.filters.update(FILTERS)
def is_safe_attribute(self, obj, attr, value):
"""
Allow access to ``_`` prefix vars (but not ``__``).
Unlike the stock SandboxedEnvironment, we allow access to "private" attributes (ones starting with
``_``) whilst still blocking internal or truely private attributes (``__`` prefixed ones).
"""
return not jinja2.sandbox.is_internal_attribute(obj, attr)
def ds_filter(value):
return value.strftime('%Y-%m-%d')
def ds_nodash_filter(value):
return value.strftime('%Y%m%d')
def ts_filter(value):
return value.isoformat()
def ts_nodash_filter(value):
return value.strftime('%Y%m%dT%H%M%S')
def ts_nodash_with_tz_filter(value):
return value.isoformat().replace('-', '').replace(':', '')
FILTERS = {
'ds': ds_filter,
'ds_nodash': ds_nodash_filter,
'ts': ts_filter,
'ts_nodash': ts_nodash_filter,
'ts_nodash_with_tz': ts_nodash_with_tz_filter,
}
|
from django.contrib import admin
from socialregistration.contrib.sapo.models import SapoProfile
admin.site.register(SapoProfile)
|
import requests
import json
def test_hello(url):
response = requests.get(url)
assert response.status_code == 200
assert response.text == "Hello, World!" |
import subprocess as sp
import soundfile as sf
import tempfile as tmp
from itertools import chain
import warnings
import re
import stempeg
def check_available_aac_encoders():
"""Returns the available AAC encoders
Returns
----------
codecs : list(str)
List of available encoder codecs
"""
cmd = [
'ffmpeg',
'-v', 'error',
'-codecs'
]
output = sp.check_output(cmd)
aac_codecs = [
x for x in
output.splitlines() if "AAC (Advanced Audio Coding)" in str(x)
][0]
hay = aac_codecs.decode('ascii')
match = re.findall(r'\(encoders: ([^\)]*) \)', hay)
if match:
return match[0].split(" ")
else:
return None
def write_stems(
audio,
filename,
rate=44100,
bitrate=256000,
codec=None,
ffmpeg_params=None
):
"""Write stems from numpy Tensor
Parameters
----------
audio : array_like
The tensor of Matrix of stems. The data shape is formatted as
:code:`stems x channels x samples`.
filename : str
Output file_name of the stems file
rate : int
Output samplerate. Defaults to 44100 Hz.
bitrate : int
AAC Bitrate in Bits per second. Defaults to 256 Kbit/s
codec : str
AAC codec used. Defaults to `None` which automatically selects
either `libfdk_aac` or `aac` in that order, determined by availability.
ffmpeg_params : list(str)
List of additional ffmpeg parameters
Notes
-----
Output is written as 16bit/44.1 kHz
"""
if int(stempeg.ffmpeg_version()[0]) < 3:
warnings.warn(
"Writing STEMS with FFMPEG version < 3 is unsupported", UserWarning
)
if codec is None:
avail = check_available_aac_encoders()
if avail is not None:
if 'libfdk_aac' in avail:
codec = 'libfdk_aac'
else:
codec = 'aac'
warnings.warn("For better quality, please install libfdc_aac")
else:
codec = 'aac'
warnings.warn("For better quality, please install libfdc_aac")
tmps = [
tmp.NamedTemporaryFile(delete=False, suffix='.wav')
for t in range(audio.shape[0])
]
if audio.shape[1] % 1024 != 0:
warnings.warn(
"Number of samples does not divide by 1024, be aware that "
"the AAC encoder add silence to the input signal"
)
for k in range(audio.shape[0]):
sf.write(tmps[k].name, audio[k], rate)
cmd = (
[
'ffmpeg', '-y',
"-f", 's%dle' % (16),
"-acodec", 'pcm_s%dle' % (16),
'-ar', "%d" % rate,
'-ac', "%d" % 2
] +
list(chain.from_iterable(
[['-i', i.name] for i in tmps]
)) +
list(chain.from_iterable(
[['-map', str(k)] for k, _ in enumerate(tmps)]
)) +
[
'-vn',
'-acodec', codec,
'-ar', "%d" % rate,
'-strict', '-2',
'-loglevel', 'error'
] +
(['-ab', str(bitrate)] if (bitrate is not None) else []) +
(ffmpeg_params if ffmpeg_params else []) +
[filename]
)
sp.call(cmd)
|
"""
Numpy stages.
The examples in this section assume
>>> import numpy
"""
import types
try:
from ._src import *
from ._filt import *
from ._snk import *
except ValueError:
from _src import *
from _filt import *
from _snk import *
__all__ = []
for m in [_src, _filt, _snk]:
for s in dir(m):
if s[0] == '_':
continue
try:
if eval('not isinstance(m.%s, types.ModuleType)' % s):
__all__.append(s)
except NameError as e:
pass
|
import os
import math
from PIL import Image
import requests
from io import BytesIO
from torchvision import transforms, utils
class DataLoader():
def __init__(self):
pass
def load(self, url, size):
try:
response = requests.get(url)
img = Image.open(BytesIO(response.content)).convert('RGB')
except:
return None
t = transforms.Compose([transforms.Resize(size),
transforms.CenterCrop(size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
res = (t(img) * 255).int()
return res.reshape((size*size*3))
def save_imagenet_subset(self, root, name, class_wnids, image_size, max_images=None):
with open(os.path.join(root, name) + '.data', 'w+') as data:
with open(os.path.join(root, name) + '.label', 'w+') as label:
for i, wnid in enumerate(class_wnids):
urls = requests.get('http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=' + wnid).content
urls = urls.split(b"\n")
images = 0
for u in range(len(urls)):
if max_images is not None and images+1 > max_images / len(class_wnids):
break
img = self.load(urls[u].decode('utf-8'), image_size)
if img is None:
continue
images += 1
data.write(' '.join([str(rgb) for rgb in img.numpy()]) + '\n')
label.write(str(i) + '\n')
missing = math.floor(max_images/len(class_wnids)) - images
if missing > 0:
print('Wnid', wnid, 'needs', missing, 'more images.') |
#!/usr/bin/env python3
import rospy
import smach
from smach import State
# define state Foo
class Code(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['time_to_sleep','done'])
self.counter = 0
def execute(self, userdata):
rospy.loginfo('Executing state FOO')
if self.counter < 3:
self.counter += 1
rospy.loginfo("Time to sleep")
return 'time_to_sleep'
else:
return 'done'
# define state Bar
class Sleep(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['continue_coding'])
def execute(self, userdata):
rospy.loginfo('Done sleeping get back to coding')
return 'continue_coding'
def initialiseStateMachine():
# Initialise the ros node
rospy.init_node('smach_example_state_machine')
# Create a SMACH state machine with 2 outcomes?
sm = smach.StateMachine(outcomes=['project_delivered', 'burned_out'])
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('CODE', Code(),
transitions={'time_to_sleep':'SLEEP',
'done':'project_delivered'})
smach.StateMachine.add('SLEEP', Sleep(),
transitions={'continue_coding':'CODE'})
# Execute SMACH plan
outcome = sm.execute()
if __name__ == '__main__':
initialiseStateMachine()
|
# -*- coding: utf8 -*-
import os, sys
import requests
import logging
from io import BytesIO
from sanic import Sanic, response
from sanic_cors import CORS
from sanic.response import text, json
from Services.Aslide.aslide import Aslide
from Services.Aslide.deepzoom import ADeepZoomGenerator
app = Sanic(__name__)
# 跨域
CORS(app)
tif_path_cache = {}
slide_cache = {}
QAS_HOST = '192.168.2.179:8010'
TIF_PATH_PREX = '/run/user/1000/gvfs/smb-share:server=192.168.2.221,share='
# 配置日志/初始化变量
class ConfigLog(object):
def __init__(self):
# 日志输出的位置
self.log_name = '/home/kyfq/MyPython/PycharmProjects/qas/QAS/QAS/logs/tiles_server.log'
# 输出格式
self.log_format = '%(levelname)s [%(asctime)s] %(message)s'
# logging配置
logging.basicConfig(
level=logging.WARNING,
format=self.log_format,
filename=self.log_name,
)
def get_path(image_id, request):
try:
if image_id in tif_path_cache:
tif_path = tif_path_cache[image_id]
else:
tiff_url = 'http://%s/api/v1/images/%s/' % (QAS_HOST, image_id)
response = requests.get(tiff_url)
if response.status_code != 200:
raise Exception('can not get resource', response.status_code, response.content)
image_info = response.json()
tif_path = os.path.join(image_info['storage_path'], image_info['file_name']+image_info['suffix'])
tif_path_cache[image_info['id']] = tif_path
return tif_path
except Exception as e:
logging.error('获取图像路径失败:%s' % e)
def get_slide(image_id, img_path):
"""
get tiles and cache
:param img_path:
:return:
"""
img_name = os.path.basename(img_path)
img = str(image_id) + '_' + img_name
try:
if img in slide_cache:
slide = slide_cache[img]
else:
slide = Aslide(img_path)
slide_cache[img] = slide
return slide
except Exception as e:
logging.error('读取图像失败:%s' % e)
@app.route('/tiles/<image_id>/')
async def tiles_dzi(request, image_id):
"""
get tiff information
:param request:
:param image_id: id of tiff image
:return:
"""
slide = get_slide(image_id, get_path(image_id, request))
try:
zoomer = ADeepZoomGenerator(slide).get_dzi('jpeg')
return response.html(zoomer)
except Exception as e:
return response.html(str(e))
@app.route('/tiles/label_image/<image_id>_label.<format:[A-z]+>')
async def label_image(request, image_id, format):
"""
get tile image
:param request:
:param image_id: id of tiff image
:param format: view format
:return:
"""
slide = get_slide(image_id, get_path(image_id, request))
bio = BytesIO()
label_image = slide.label_image
# 如果标签存在则保存,否则返回一个空字节
if label_image:
label_image.save(bio, 'png')
image_bytes = bio.getvalue()
else:
image_bytes = b''
headers = {}
headers.setdefault(
'Content-Disposition',
'attachment; image_id="{}"'.format(os.path.basename(image_id))
)
return response.HTTPResponse(status=200, headers=headers,
body_bytes=image_bytes, content_type='image/png')
@app.route('/tiles/<image_id>_files/<z:int>/<x:int>_<y:int>.<format:[A-z]+>')
async def tiles_png(request, image_id, z, x, y, format):
"""
get tile image
:param request:
:param image_id: id of tiff image
:param x: coordinate-x
:param y: coordinate-y
:param format: view format
:return:
"""
slide = get_slide(image_id, get_path(image_id, request))
x = int(x)
y = int(y)
z = int(z)
bio = BytesIO()
tiles_image = ADeepZoomGenerator(slide).get_tile(z, (x, y))
tiles_image.save(bio, 'png')
image_bytes = bio.getvalue()
headers = {}
headers.setdefault(
'Content-Disposition',
'attachment; image_id="{}"'.format(os.path.basename(image_id))
)
return response.HTTPResponse(status=200, headers=headers,
body_bytes=image_bytes, content_type='image/png')
@app.route("/tiles/screenshots/<image_id>/<x:int>_<y:int>_<w:int>_<h:int>.<format:[A-z]+>")
async def cell_image_request(request, image_id, x, y, w, h, format):
"""
get cell image
:param request:
:param image_id: id of tiff image
:param x: coordinate-x
:param y: coordinate-y
:param w: image width
:param h: image height
:return:
"""
print('==============> in')
slide = get_slide(image_id, get_path(image_id, request))
tile_image = slide.read_region((x, y), 0, (w, h))
bio = BytesIO()
tile_image.save(bio, 'png')
image_bytes = bio.getvalue()
headers = {}
headers.setdefault(
'Content-Disposition',
'attachment; image_id="{}"'.format(os.path.basename(image_id))
)
return response.HTTPResponse(status=200, headers=headers,
body_bytes=image_bytes, content_type='image/png')
if __name__ == '__main__':
# 开启日志
ConfigLog()
# access_log=False 不记录成功的日志, error_log=True, 记录失败的日志
app.run(host='192.168.2.179', port=5010, access_log=False, error_log=True)
# port = sys.argv[1]
# try:
# port = int(port)
# except:
# raise Exception("PORT %s IS NOT ACCEPTED!" % port)
#
# app.run(host="0.0.0.0", port=port, access_log=True, error_log=True)
|
"""
str - string
"""
print("Essa é uma 'string' (str) ")
print('Essa é uma "string" (str) ')
print("Esse é meu \"texto\" (str) ") # uso da \ como caracter de escape
print('Esse é meu \'texto\' (str) ') # uso da \ como caracter de escape
print("Esse é meu \n (str)") # erro ao usar a \ como caracter de escape
print(r"Esse é meu /n (str)") # uso do 'r' para que nada dentro das "" seja executado |
#!/usr/bin/python3
import time
from MessageConverters.MessageConverter import MessageConverter
import logging
from datetime import datetime
class KLAX(MessageConverter):
def __init__(self, devicename):
super().__init__(devicename)
def __toTime(self, byteArray):
year = 2000 + (byteArray[3] >> 1)
month = ((byteArray[3] & 0x01) << 3) | (byteArray[2] >> 5)
day = byteArray[2] & 0x1f
hours = byteArray[1] >> 3
minutes = ((byteArray[1] & 0x7) << 3) | (byteArray[0] >> 5)
seconds = byteArray[0] & 0x1f
# datetime(year, month, day, hour, minute, second, microsecond)
date_time_obj = datetime(year, month, day, hours, minutes, seconds)
return int(datetime.timestamp(date_time_obj))
def _hasDownlinkMessage(self):
return False
def _getDownlinkMessage(self):
return None
# example payload
# AGoIEQMAAAAAAAAEak2DATEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABdQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
# 00 6a 08 11 03 00 00 00 00 00 00 04 6a 4d 83 01 31 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 75 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
def _convert(self, payload, port) :
publ_array = []
curr_time = int(time.time())
try:
params = {}
value=(payload[0]<<8 | payload[1]) & 0x3FFF
entry = {}
entry["sensorid"] = "batV"
entry["ts"] = curr_time
entry["value"] = value/1000
publ_array.append(entry)
#SHT20,temperature,units:℃
value=payload[2]<<8 | payload[3]
if payload[2] & 0x80:
value = 0xFFFF0000
entry = {}
entry["sensorid"] = "temp_SHT"
entry["ts"] = curr_time
entry["value"] = value/100
publ_array.append(entry)
#SHT20,Humidity,units:%
value=payload[4]<<8 | payload[5]
entry = {}
entry["sensorid"] = "hum_SHT"
entry["ts"] = curr_time
entry["value"] = value/10
publ_array.append(entry)
#DS18B20,temperature,units:℃
value=payload[7]<<8 | payload[8]
if(payload[7] & 0x80):
value = 0xFFFF0000
entry = {}
entry["sensorid"] = "temp_ds"
entry["ts"] = curr_time
entry["value"] = value/100
publ_array.append(entry)
except Exception as err:
self.logger.exception("Error while trying to decode payload..")
return publ_array
|
from itertools import chain
from django import forms
from django.core.exceptions import ValidationError
from django.db.models import Count, Prefetch
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from pretix.base.forms.questions import (
BaseInvoiceAddressForm, BaseQuestionsForm,
)
from pretix.base.models import ItemVariation
from pretix.base.models.tax import TAXED_ZERO
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import rich_text
from pretix.base.validators import EmailBlacklistValidator
from pretix.helpers.templatetags.thumb import thumb
from pretix.presale.signals import contact_form_fields
class ContactForm(forms.Form):
required_css_class = 'required'
email = forms.EmailField(label=_('E-mail'),
help_text=_('Make sure to enter a valid email address. We will send you an order '
'confirmation including a link that you need to access your order later.'),
validators=[EmailBlacklistValidator()],
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.request = kwargs.pop('request')
self.all_optional = kwargs.pop('all_optional', False)
super().__init__(*args, **kwargs)
if self.event.settings.order_email_asked_twice:
self.fields['email_repeat'] = forms.EmailField(
label=_('E-mail address (repeated)'),
help_text=_('Please enter the same email address again to make sure you typed it correctly.'),
)
if not self.request.session.get('iframe_session', False):
# There is a browser quirk in Chrome that leads to incorrect initial scrolling in iframes if there
# is an autofocus field. Who would have thought… See e.g. here:
# https://floatboxjs.com/forum/topic.php?post=8440&usebb_sid=2e116486a9ec6b7070e045aea8cded5b#post8440
self.fields['email'].widget.attrs['autofocus'] = 'autofocus'
responses = contact_form_fields.send(self.event, request=self.request)
for r, response in responses:
for key, value in response.items():
# We need to be this explicit, since OrderedDict.update does not retain ordering
self.fields[key] = value
if self.all_optional:
for k, v in self.fields.items():
v.required = False
v.widget.is_required = False
def clean(self):
if self.event.settings.order_email_asked_twice and self.cleaned_data.get('email') and self.cleaned_data.get('email_repeat'):
if self.cleaned_data.get('email').lower() != self.cleaned_data.get('email_repeat').lower():
raise ValidationError(_('Please enter the same email address twice.'))
class InvoiceAddressForm(BaseInvoiceAddressForm):
required_css_class = 'required'
vat_warning = True
class InvoiceNameForm(InvoiceAddressForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for f in list(self.fields.keys()):
if f != 'name_parts':
del self.fields[f]
class QuestionsForm(BaseQuestionsForm):
"""
This form class is responsible for asking order-related questions. This includes
the attendee name for admission tickets, if the corresponding setting is enabled,
as well as additional questions defined by the organizer.
"""
required_css_class = 'required'
class AddOnRadioSelect(forms.RadioSelect):
option_template_name = 'pretixpresale/forms/addon_choice_option.html'
def optgroups(self, name, value, attrs=None):
attrs = attrs or {}
groups = []
has_selected = False
for index, (option_value, option_label, option_desc) in enumerate(chain(self.choices)):
if option_value is None:
option_value = ''
if isinstance(option_label, (list, tuple)):
raise TypeError('Choice groups are not supported here')
group_name = None
subgroup = []
groups.append((group_name, subgroup, index))
selected = (
force_text(option_value) in value and
(has_selected is False or self.allow_multiple_selected)
)
if selected is True and has_selected is False:
has_selected = True
attrs['description'] = option_desc
subgroup.append(self.create_option(
name, option_value, option_label, selected, index,
subindex=None, attrs=attrs,
))
return groups
class AddOnVariationField(forms.ChoiceField):
def valid_value(self, value):
text_value = force_text(value)
for k, v, d in self.choices:
if value == k or text_value == force_text(k):
return True
return False
class AddOnsForm(forms.Form):
"""
This form class is responsible for selecting add-ons to a product in the cart.
"""
def _label(self, event, item_or_variation, avail, override_price=None):
if isinstance(item_or_variation, ItemVariation):
variation = item_or_variation
item = item_or_variation.item
price = variation.price
label = variation.value
else:
item = item_or_variation
price = item.default_price
label = item.name
if override_price:
price = override_price
if self.price_included:
price = TAXED_ZERO
else:
price = item.tax(price)
if not price.gross:
n = '{name}'.format(
name=label
)
elif not price.rate:
n = _('{name} (+ {price})').format(
name=label, price=money_filter(price.gross, event.currency)
)
elif event.settings.display_net_prices:
n = _('{name} (+ {price} plus {taxes}% {taxname})').format(
name=label, price=money_filter(price.net, event.currency),
taxes=number_format(price.rate), taxname=price.name
)
else:
n = _('{name} (+ {price} incl. {taxes}% {taxname})').format(
name=label, price=money_filter(price.gross, event.currency),
taxes=number_format(price.rate), taxname=price.name
)
if avail[0] < 20:
n += ' – {}'.format(_('SOLD OUT'))
elif avail[0] < 100:
n += ' – {}'.format(_('Currently unavailable'))
else:
if avail[1] is not None and event.settings.show_quota_left:
n += ' – {}'.format(_('%(num)s currently available') % {'num': avail[1]})
if not isinstance(item_or_variation, ItemVariation) and item.picture:
n = escape(n)
n += '<br>'
n += '<a href="{}" class="productpicture" data-title="{}" data-lightbox={}>'.format(
item.picture.url, escape(escape(item.name)), item.id
)
n += '<img src="{}" alt="{}">'.format(
thumb(item.picture, '60x60^'),
escape(item.name)
)
n += '</a>'
n = mark_safe(n)
return n
def __init__(self, *args, **kwargs):
"""
Takes additional keyword arguments:
:param category: The category to choose from
:param event: The event this belongs to
:param subevent: The event the parent cart position belongs to
:param initial: The current set of add-ons
:param quota_cache: A shared dictionary for quota caching
:param item_cache: A shared dictionary for item/category caching
"""
category = kwargs.pop('category')
event = kwargs.pop('event')
subevent = kwargs.pop('subevent')
current_addons = kwargs.pop('initial')
quota_cache = kwargs.pop('quota_cache')
item_cache = kwargs.pop('item_cache')
self.price_included = kwargs.pop('price_included')
self.sales_channel = kwargs.pop('sales_channel')
super().__init__(*args, **kwargs)
if subevent:
item_price_override = subevent.item_price_overrides
var_price_override = subevent.var_price_overrides
else:
item_price_override = {}
var_price_override = {}
ckey = '{}-{}'.format(subevent.pk if subevent else 0, category.pk)
if ckey not in item_cache:
# Get all items to possibly show
items = category.items.filter_available(
channel=self.sales_channel,
allow_addons=True
).select_related('tax_rule').prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.filter(subevent=subevent)),
Prefetch('variations', to_attr='available_variations',
queryset=ItemVariation.objects.filter(active=True, quotas__isnull=False).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.filter(subevent=subevent))
).distinct()),
).annotate(
quotac=Count('quotas'),
has_variations=Count('variations')
).filter(
quotac__gt=0
).order_by('category__position', 'category_id', 'position', 'name')
item_cache[ckey] = items
else:
items = item_cache[ckey]
for i in items:
if i.has_variations:
choices = [('', _('no selection'), '')]
for v in i.available_variations:
cached_availability = v.check_quotas(subevent=subevent, _cache=quota_cache)
if v._subevent_quotas:
choices.append(
(v.pk,
self._label(event, v, cached_availability,
override_price=var_price_override.get(v.pk)),
v.description)
)
n = i.name
if i.picture:
n = escape(n)
n += '<br>'
n += '<a href="{}" class="productpicture" data-title="{}" data-lightbox="{}">'.format(
i.picture.url, escape(escape(i.name)), i.id
)
n += '<img src="{}" alt="{}">'.format(
thumb(i.picture, '60x60^'),
escape(i.name)
)
n += '</a>'
n = mark_safe(n)
field = AddOnVariationField(
choices=choices,
label=n,
required=False,
widget=AddOnRadioSelect,
help_text=rich_text(str(i.description)),
initial=current_addons.get(i.pk),
)
if len(choices) > 1:
self.fields['item_%s' % i.pk] = field
else:
if not i._subevent_quotas:
continue
cached_availability = i.check_quotas(subevent=subevent, _cache=quota_cache)
field = forms.BooleanField(
label=self._label(event, i, cached_availability,
override_price=item_price_override.get(i.pk)),
required=False,
initial=i.pk in current_addons,
help_text=rich_text(str(i.description)),
)
self.fields['item_%s' % i.pk] = field
|
from .config import add_dqrf_config
from .config import add_dataset_path
from .dqrf_detr import DQRF_DETR |
# 图片验证码的有效期, 单位秒
IMAGE_CODE_REDIS_EXPIRES = 300
# 短信验证码有效期
SMS_CODE_REDIS_EXPIRES = 300
# 短信验证码发送间隔
SEND_SMS_CODE_INTERVAL = 60
# 短信验证码模板编号
SMS_CODE_TEMP_ID = 1 |
from django.db.models import Max
from django.db.models.expressions import OuterRef, Subquery
from haystack import indexes
from djangocms_internalsearch.helpers import (
get_version_object,
get_versioning_extension,
)
class BaseSearchConfig(indexes.SearchIndex, indexes.Indexable):
"""
Base config class to provide list of attributes that sub class must provide
"""
text = indexes.CharField(document=True, use_template=False)
text_ngram = indexes.NgramField(document=False, use_template=False)
# admin setting
list_per_page = 50
@property
def model(self):
raise NotImplementedError("Config class must provide model attribute")
@property
def list_display(self):
raise NotImplementedError("Config class must provide list_display fields")
def get_model(self):
return self.model
def prepare_text(self, obj):
raise NotImplementedError(
"Config class must provide prepare_text method for index"
)
def prepare_text_ngram(self, obj):
return self.prepare_text(obj)
class BaseVersionableSearchConfig(BaseSearchConfig):
"""
Base version-able config class to provide list of attributes that sub class must provide
"""
version_author = indexes.CharField()
version_status = indexes.CharField()
locked = indexes.CharField()
is_latest_version = indexes.BooleanField()
def prepare_version_status(self, obj):
version_obj = get_version_object(obj)
if not version_obj:
return
return version_obj.state
def prepare_version_author(self, obj):
version_obj = get_version_object(obj)
if not version_obj:
return
return version_obj.created_by.username
def prepare_is_latest_version(self, obj):
latest_pk = getattr(obj, "latest_pk", None)
return obj.pk == latest_pk
def prepare_locked(self, obj):
version_obj = get_version_object(obj)
lock = getattr(version_obj, "versionlock", None)
if lock is None:
return None
return lock.created_by.username
def annotated_model_queryset(self, using=None):
"""Returns a model queryset annotated with latest_pk,
the primary key corresponding to the latest version
"""
versioning_extension = get_versioning_extension()
versionable = versioning_extension.versionables_by_content.get(self.model)
fields = {field: OuterRef(field) for field in versionable.grouping_fields}
inner = (
self.model._base_manager.filter(**fields)
.annotate(version=Max("versions__number"))
.order_by("-version")
.values("pk")
)
return self.model._base_manager.using(using).annotate(
latest_pk=Subquery(inner[:1])
)
def index_queryset(self, using=None):
versioning_extension = get_versioning_extension()
if versioning_extension and versioning_extension.is_content_model_versioned(
self.model
):
return self.annotated_model_queryset()
else:
return super().index_queryset(using)
|
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask_cors import CORS
# Flask Blueprint 정의
api = Blueprint('api', __name__)
CORS(api) # enable CORS on the API_v1.0 blueprint
from . import particle, search
|
from django.apps import AppConfig
class MainbookConfig(AppConfig):
name = 'mainBook'
|
import os
import shutil
from unittest import TestCase
from broker.service.spreadsheet_storage.spreadsheet_storage_service import SpreadsheetStorageService
TEST_STORAGE_DIR = "test_storage_dir"
class SpreadsheetStorageServiceTest(TestCase):
def setUp(self):
os.mkdir(TEST_STORAGE_DIR)
def test_store_submission_spreadsheet(self):
test_storage_dir = "test_storage_dir"
mock_submission_uuid = "mock-uuid"
mock_spreadsheet_name = "mock_spreadsheet.xls"
mock_spreadsheet_blob = bytes.fromhex('6d6f636b64617461')
spreadsheet_storage_service = SpreadsheetStorageService(test_storage_dir)
try:
spreadsheet_storage_service.store_submission_spreadsheet(mock_submission_uuid, mock_spreadsheet_name, mock_spreadsheet_blob)
assert True
except Exception as e:
assert False
def test_retrieve_spreadsheet(self):
test_storage_dir = "test_storage_dir"
mock_submission_uuid = "mock-uuid"
mock_spreadsheet_name = "mock_spreadsheet.xls"
mock_spreadsheet_blob = bytes.fromhex('6d6f636b64617461')
spreadsheet_storage_service = SpreadsheetStorageService(test_storage_dir)
try:
spreadsheet_storage_service.store_submission_spreadsheet(mock_submission_uuid, mock_spreadsheet_name, mock_spreadsheet_blob)
spreadsheet = spreadsheet_storage_service.retrieve_submission_spreadsheet(mock_submission_uuid)
assert spreadsheet["name"] == mock_spreadsheet_name
assert spreadsheet["blob"] == mock_spreadsheet_blob
except Exception as e:
assert False
def tearDown(self):
shutil.rmtree(TEST_STORAGE_DIR)
|
import pymysql
db = pymysql.connect("localhost","testuser","test123","sakila",3306)
cursor = db.cursor()
sql = "SELECT VERSION()"
cursor.execute(sql)
data = cursor.fetchone()
print("Database version : %s" % data)
db.close()
|
"""
Custom Django settings for django-user-tasks.
"""
from datetime import timedelta
from django.conf import settings as django_settings
from django.core.files.storage import get_storage_class
from user_tasks import filters
class LazySettings():
"""
The behavior of ``django-user-tasks`` can be customized via the following Django settings.
"""
@property
def USER_TASKS_ARTIFACT_FILTERS(self): # pylint: disable=invalid-name
"""
Tuple containing zero or more filters for UserTaskArtifact listing REST API calls.
Each entry should be a Django REST Framework filter backend class
object, such as ``django_filters.rest_framework.DjangoFilterBackend``.
The default value contains only ``user_tasks.filters.ArtifactFilterBackend``,
which allows superusers to see all artifacts but other users to see only
those for artifacts they triggered themselves.
"""
return getattr(django_settings, 'USER_TASKS_STATUS_FILTERS', (filters.ArtifactFilterBackend,))
@property
def USER_TASKS_ARTIFACT_STORAGE(self): # pylint: disable=invalid-name
"""
File storage backend to use for :py:attr:`user_tasks.models.UserTaskStatus.file`.
If explicitly set, the setting should be the import path of a storage
backend class.
"""
import_path = getattr(django_settings, 'USER_TASKS_ARTIFACT_STORAGE', None)
return get_storage_class(import_path)()
@property
def USER_TASKS_MAX_AGE(self): # pylint: disable=invalid-name
"""
``timedelta`` reflecting the age after which UserTaskStatus records should be deleted.
For this setting to be useful, ``user_tasks.tasks.purge_old_user_tasks``
should be configured to run on an appropriate schedule. Note that the
age is calculated from task creation, not completion. The default
value is 30 days.
"""
return getattr(django_settings, 'USER_TASKS_MAX_AGE', timedelta(days=30))
@property
def USER_TASKS_STATUS_FILTERS(self): # pylint: disable=invalid-name
"""
Tuple containing zero or more filters for UserTaskStatus listing REST API calls.
Each entry should be a Django REST Framework filter backend class
object, such as ``django_filters.rest_framework.DjangoFilterBackend``.
The default value contains only ``user_tasks.filters.StatusFilterBackend``,
which allows superusers to see all task statuses but other users to see only
those for tasks they triggered themselves.
"""
return getattr(django_settings, 'USER_TASKS_STATUS_FILTERS', (filters.StatusFilterBackend,))
settings = LazySettings() # pylint: disable=invalid-name
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
import os
CHROME_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir))
EMULATOR_SDK_ROOT = os.path.abspath(os.path.join(CHROME_DIR, os.pardir,
os.pardir))
CHROME_PACKAGE = 'com.google.android.apps.chrome'
CHROME_ACTIVITY = 'com.google.android.apps.chrome.Main'
CHROME_DEVTOOLS_SOCKET = 'chrome_devtools_remote'
CHROME_TESTS_PACKAGE = 'com.google.android.apps.chrome.tests'
LEGACY_BROWSER_PACKAGE = 'com.google.android.browser'
LEGACY_BROWSER_ACTIVITY = 'com.android.browser.BrowserActivity'
CONTENT_SHELL_PACKAGE = 'org.chromium.content_shell_apk'
CONTENT_SHELL_ACTIVITY = 'org.chromium.content_shell_apk.ContentShellActivity'
CHROME_SHELL_PACKAGE = 'org.chromium.chrome.browser.test'
CHROMIUM_TEST_SHELL_PACKAGE = 'org.chromium.chrome.testshell'
CHROMIUM_TEST_SHELL_ACTIVITY = (
'org.chromium.chrome.testshell.ChromiumTestShellActivity')
CHROMIUM_TEST_SHELL_DEVTOOLS_SOCKET = 'chromium_testshell_devtools_remote'
CHROMIUM_TEST_SHELL_HOST_DRIVEN_DIR = os.path.join(
CHROME_DIR, 'chrome', 'android')
GTEST_TEST_PACKAGE_NAME = 'org.chromium.native_test'
GTEST_TEST_ACTIVITY_NAME = 'org.chromium.native_test.ChromeNativeTestActivity'
GTEST_COMMAND_LINE_FILE = 'chrome-native-tests-command-line'
BROWSERTEST_SUITE_NAME = 'content_browsertests'
BROWSERTEST_TEST_PACKAGE_NAME = 'org.chromium.content_browsertests_apk'
BROWSERTEST_TEST_ACTIVITY_NAME = (
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity')
BROWSERTEST_COMMAND_LINE_FILE = 'content-browser-tests-command-line'
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
# The net test server is started from 10000. Reserve 20000 ports for the all
# test-server based tests should be enough for allocating different port for
# individual test-server based test.
TEST_SERVER_PORT_FIRST = 10000
TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = '/data/data/' + CHROME_PACKAGE + '/files'
SCREENSHOTS_DIR = os.path.join(CHROME_DIR, 'out_screenshots')
ANDROID_SDK_VERSION = 17
ANDROID_SDK_ROOT = os.path.join(CHROME_DIR, 'third_party/android_tools/sdk')
ANDROID_NDK_ROOT = os.path.join(CHROME_DIR, 'third_party/android_tools/ndk')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
|
#
# from moviepy.editor import VideoFileClip
# output2 = 'trucks2_cut.mp4'
# clip2 = VideoFileClip("DJI_0686.MOV").subclip(0,14)
#
# clip2.write_videofile(output2, audio=False)
# #clip2 = VideoFileClip("challenge_video.mp4").subclip(20,28)
from moviepy.editor import VideoFileClip
output2 = 'horse.mp4'
clip2 = VideoFileClip("horse_vid.mp4").subclip(120,130)
clip2.write_videofile(output2, audio=False)
#clip2 = VideoFileClip("challenge_video.mp4").subclip(20,28)
|
import datetime
from IPython.display import display, display_javascript, display_html
import json
import pydoc
from typing import Union
import uuid
from sfaira.data.dataloaders.base import DatasetGroup, DatasetSuperGroup
from sfaira.data.dataloaders.databases.cellxgene.cellxgene_loader import Dataset
from sfaira.data.dataloaders.databases.cellxgene.rest_helpers import get_collection, get_collections
class DatasetGroupCellxgene(DatasetGroup):
collection_id: str
def __init__(
self,
collection_id: str = "default",
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
verbose: int = 0,
):
self._collection = None
dataset_ids = [x["id"] for x in get_collection(collection_id=collection_id)['datasets']]
loader_pydoc_path_sfaira = "sfaira.data.dataloaders.databases.cellxgene.cellxgene_loader"
load_func = pydoc.locate(loader_pydoc_path_sfaira + ".load")
datasets = [
Dataset(
collection_id=collection_id,
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path,
load_func=load_func,
sample_fn=x,
sample_fns=dataset_ids,
verbose=verbose,
)
for x in dataset_ids
]
keys = [x.id for x in datasets]
super().__init__(datasets=dict(zip(keys, datasets)), collection_id=collection_id)
@property
def collection(self):
if self._collection is None:
self._collection = get_collection(collection_id=self.collection_id)
return self._collection
def show_summary(self):
uuid_session = str(uuid.uuid4())
display_html('<div id="{}" style="height: 600px; width:100%;"></div>'.format(uuid_session), raw=True)
display_javascript("""
require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() {
document.getElementById('%s').appendChild(renderjson(%s))
});
""" % (uuid_session, json.dumps(self.collection)), raw=True)
class DatasetSuperGroupCellxgene(DatasetSuperGroup):
def __init__(
self,
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
verbose: int = 0,
):
self._collections = None
# Get all collection IDs and instantiate one data set group per collection.
# Note that the collection itself is not passed to DatasetGroupCellxgene but only the ID string.
dataset_groups = [
DatasetGroupCellxgene(
collection_id=x["id"],
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path,
verbose=verbose,
)
for x in self.collections
]
super().__init__(dataset_groups=dataset_groups)
@property
def collections(self):
"""
Yield all collections available from REST API.
"""
if self._collections is None:
self._collections = get_collections()
return self._collections
def show_summary(self):
"""
Prints overview of all collections available.
"""
display("There are " + str(len(self.collections)) + " public collections sorting by newest first:")
for collection in sorted(self.collections, key=lambda key: key['created_at'], reverse=True):
display("id: " + collection['id'] + ' created on: ' +
datetime.date.fromtimestamp(collection['created_at']).strftime("%m/%d/%y"))
|
import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
#scraper for wildflower.org
def scrape(name):
image_urls = set()
caps = webdriver.DesiredCapabilities().FIREFOX
caps["marionette"] = True
driver = webdriver.Firefox(capabilities=caps)
driver.get('https://www.wildflower.org/gallery/')
text_box = driver.find_element_by_id('search_field2')
go = driver.find_element_by_xpath("//input[@value='go']")
text_box.send_keys(name)
go.click()
time.sleep(1)
page_div = driver.find_element_by_id('fullpage_content')
images = page_div.find_elements_by_xpath('//img')
for image in images[1:]:
link = image.get_attribute('src').replace('320x240','640x480').replace('160x120','640x480')
image_urls.add(link)
driver.close()
return image_urls
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 16:53:19 2019
@author: gparkes
"""
import numpy as np
import matplotlib.pyplot as plt
# task 1
def monte_carlo_integrate(f, dx, dy, N):
area = (dx[1] - dx[0])*(dy[1] - dy[0])
# generate random numbers in 2-d
pairs = np.random.rand(N,2)
# move pairs into domain [x,y]
pairs[:,0] *= dx[1] - dx[0]
pairs[:,0] += dx[0]
pairs[:,1] *= dy[1] - dy[0]
pairs[:,1] += dy[0]
# x is in [:,0]
integrand = f(pairs[:,0])
# choose k where random numbers y fall below the integrand
k = pairs[:,1] < integrand
return (area * np.sum(k)) / N
# task 2
def task_2():
def f(x):
return np.sin(1/(x*(2-x)))**2
I = monte_carlo_integrate(f, [0, 2], [0, 1], 10**5)
print(I)
# task 3
def pi(x):
return np.sqrt(4-x**2)
Nvals = 100*2**np.arange(0,15)
errs = np.zeros((15,))
for i, N in enumerate(Nvals):
errs[i] = abs(monte_carlo_integrate(pi, [0, 2], [0, 2], N ) - np.pi)
# task 4
def task_4():
plt.loglog(Nvals, errs, 'kx')
plt.xlabel(r"$N$")
plt.ylabel(r"$E$")
plt.title("Convergence plot of Monte Carlo Integration")
plt.show()
# task 5
def task_5():
plt.loglog(Nvals, errs, 'kx')
m,b = np.polyfit(np.log(Nvals), np.log(errs), 1)
plt.loglog(Nvals, np.exp(b)*Nvals**m, 'b--')
plt.show()
|
from pathlib import Path
import httpx
class Poem:
def __init__(self):
self._token_url = "https://v2.jinrishici.com/token"
self._poem_url = "https://v2.jinrishici.com/sentence"
self._token = self._init_token()
def _init_token(self) -> str:
token_path = Path("./poem_token")
if not token_path.exists():
token = self._get_token()
with token_path.open("w") as f:
f.write(token)
return token
with token_path.open() as f:
return f.read().strip()
def _get_token(self) -> str:
res = httpx.get(self._token_url)
assert res.status_code == 200, f"poem 请求token失败: {res.status_code}"
j = res.json()
assert j["status"] == "success", f"poem 请求token失败: {j['status']}"
return j["data"]
def get_poem(self) -> str:
assert self._token, "poem 无可用token"
res = httpx.get(self._poem_url, headers={"X-User-Token": self._token})
assert res.status_code == 200, f"poem 获取诗句失败: {res.status_code}"
j = res.json()
assert j["status"] == "success", f"poem 获取诗句失败: {j['status']}"
return j["data"]["content"]
poem = Poem()
|
'''
Module for comparing data in two DataTables.
The main entry point is the diff method which takes the two tables and the list of headers which specifies the "primary key" for the tables
diff returns a ResultSet instance which contains the difference data by primary key.
ResultSet contains several methods for pruning expected differences and for slicing up the results to help discover patterns in the changes:
ignoreField prunes all differences for the given field
checkRemove prunes differences for the given field which match the given predicate
checkRemove_multiField prunes differences which affect multiple fields (e.g. we expect the change to move some of the value from one field to another so long as the sum is the same)
changedFields returns the list of fields which reported differences
filter takes a Result predicate and returns a new ResultSet containing the matching Results
pick returns a (pseudo-)random Result from the ResultSet
original{From|To}Rows returns a DataTable containing the rows which had differences
ResultSet also contains a few methods for formatted output:
repr(rs) returns a summary of the differences (the number of results in the collection)
str(rs) returns a concise display of the results showing specifically what changed
rs.printFormatted() prints a fixed-width-formatted tabular display of the results
ResultSet contains a list of Result instances which represents the changes to a single key entry (which may be multiple from/to rows if the "primary key" used doesn't guarantee uniqueness)
fromRow and toRow represent the entire original row (or rows)
repr(result) returns a summary of the differences (the number of from and to rows)
str(result) returns the differences - if there is one fromRow and one toRow returns the collection of fields which changed, otherwise reports the number of from and to rows
ignoreField, checkRemove and checkRemove_multiField are also available, but should be ignored on the Result object as they are called from the ResultSet when its corresponding methods are called
'''
from collections import defaultdict
from datatable import DataTable
from datatable_util import AttributeDict, sortKey
from functools import total_ordering
@total_ordering
class Result:
'''
Result class representing the difference between rows for a given bucket
Contains the key for this bucket (may be used to find the rows in the original files),
those fields which changed with the from and to values, and the actual from and to rows
'''
def __init__(self, key, keyFields, diffFields, fromRow, toRow):
self.key = key
self.diffFields = diffFields
self.fromRow = fromRow
self.toRow = toRow
self.__dict__.update(dict(zip(keyFields, key)))
self.__data = {}
if fromRow and toRow and len(fromRow) == 1 and len(toRow) == 1:
#extract the fields that are different between the two runs
for i, (f, t) in enumerate(zip(fromRow[0], toRow[0])):
if f != t:
self.__data[i] = f, t
def __eq__(self, other):
if isinstance(other, Result):
return self.key == other.key and self.__data == other.__data
if isinstance(other, tuple):
return self.key == other
if isinstance(other, dict):
return not any(self.__dict__[k] != other[k] for k in other.keys())
raise NotImplementedError
def __lt__(self, other):
if self == other:
return 0
if isinstance(other, Result):
def it():
yield self.key, other.key
for k in set(self.__data.keys()).union(other.__data.keys()):
yield self.__data.get(k), other.__data.get(k)
for s, o in it():
if s != o:
return tuple(sortKey(i) for i in s) < tuple(sortKey(i) for i in o)
return False
if isinstance(other, tuple):
return self.key < other
def comparable(self):
return bool(self.__data)
def __bool__(self):
return bool(self.__data or self.fromRow is None or self.toRow is None or len(self.fromRow) != len(self.toRow))
def __getitem__(self, field):
return self.__data[self.diffFields[field]]
def __contains__(self, field):
return self.diffFields[field] in self.__data
def __delitem__(self, field):
del self.__data[self.diffFields[field]]
def ignoreField(self, field):
if field in self:
del self.__data[self.diffFields[field]]
def checkRemove(self, field, filterMethod):
'''
remove the field from the result if filterMethod returns true for the fromRow, toRow pairs
field is the field to check
filterMethod is a method which takes two parameters (the fromRow and toRow versions of the field) and returns if they can be removed from the result
'''
fieldIdx = self.diffFields[field]
if fieldIdx in self.__data:
f, t = self.__data[fieldIdx]
if filterMethod(f, t):
del self.__data[fieldIdx]
def checkRemove_multiField(self, filterMethod, *fields):
'''
remove the set of fields from the result if filterMethod returns true for those entries
filterMethod is a method which takes two dicts: fromRow and toRow, with those fields specified by the fields parameter and returns if those values can be removed from the result
fields is a list of fields to check and possibly remove
'''
fieldIdxs = tuple((field, self.diffFields[field]) for field in fields)
if any(fieldIdx not in self.__data for field, fieldIdx in fieldIdxs):
return
fromRow, toRow = (AttributeDict((field, self.__data[fieldIdx][i]) for field, fieldIdx in fieldIdxs) for i in (0, 1))
if filterMethod(fromRow, toRow):
for field, fieldIdx in fieldIdxs:
del self.__data[fieldIdx]
def customCheck(self, keyFields, filterMethod, *fieldsToRemove):
'''
remove the set of fields from result if filterMethod returns true for the original fromRow and toRow pair
filterMethod is a method which takes two dicts: fromRow and toRow, with the data from the original from and to rows
fields is the list of fields to remove when filterMethod returns true
'''
if not (self.fromRow and self.toRow and len(self.fromRow) == 1 and len(self.toRow) == 1):
return
fromRow = self.originalFromRows(keyFields)[0]
toRow = self.originalToRows(keyFields)[0]
if filterMethod(fromRow, toRow):
for fieldIdx in tuple(self.diffFields[field] for field in fieldsToRemove):
if fieldIdx in self.__data:
del self.__data[fieldIdx]
def __repr__(self):
return 'Result(%s) # from rows: %d, to rows: %d' % (repr(self.key), len(self.fromRow) if self.fromRow else 0, len(self.toRow) if self.toRow else 0)
def __str__(self):
if self.__data:
return '%s\t\t%s' % (self.key, {field: self.__data[fieldIdx] for field, fieldIdx in self.diffFields.items() if fieldIdx in self.__data})
return '%s\tFrom: %s\tTo: %s' % (self.key, len(self.fromRow) if self.fromRow else 0, len(self.toRow) if self.toRow else 0)
def dataKeys(self):
return tuple(field for field, fieldIdx in self.diffFields.items() if fieldIdx in self.__data)
def getLengths(self):
return [len('%s' % k) for k in self.key]
def formatKeys(self, lengths):
return ', '.join(('% ' + str(l) + 's') % k for l, k in zip(lengths, self.key)) + ' |'
def originalFromRows(self, keyFields):
return [
AttributeDict(
{field: fromRow[fieldIdx]
for field, fieldIdx in self.diffFields.items()
}
) + dict(zip(keyFields, self.key)) for fromRow in (self.fromRow or [])
]
def originalToRows(self, keyFields):
return [
AttributeDict(
{field: toRow[fieldIdx]
for field, fieldIdx in self.diffFields.items()
}
) + dict(zip(keyFields, self.key)) for toRow in (self.toRow or [])
]
class ResultSet:
'''
ResultSet class representing the complete set of diff results.
Each bucket in either table is represented by a Result instance.
Provides filtering, iterating over the results and pretty-printing.
'''
def __init__(self, keyFields):
self.__data = defaultdict(lambda : [])
self.keyFields = keyFields
def __iadd__(self, result):
if isinstance(result, Result) and result:
self.__data[result.key].append(result)
return self
def filter(self, criteria):
newResults = ResultSet(self.keyFields)
for result in self:
if criteria(result):
newResults += result
return newResults
def __len__(self):
return len(self.__data)
def __iter__(self):
for rList in self.__data.values():
yield from rList
def __getitem__(self, key):
if key in self.__data:
return self.__data[key]
raise KeyError(key)
def __delitem__(self, key):
if isinstance(key, Result):
self.__data[key.key].remove(key)
if not self.__data[key.key]:
del self.__data[key.key]
else:
del self.__data[key]
def __repr__(self):
return 'ResultSet() # length: %d' % len(self.__data)
def __str__(self):
def tempIter():
yield 'Results:'
for result in self:
yield str(result)
return '\n'.join(tempIter())
def printFormatted(self):
for line in _formatResults(self):
print(line)
def maxKeyLengths(self):
candidates = [self.keyFields] + [result.key for result in self]
return [max(len('%s' % row[i]) for row in candidates) for i in range(len(self.keyFields))]
def formatKeyFields(self, lengths):
return ', '.join(('% ' + str(l) + 's') % k for l, k in zip(lengths, self.keyFields)) + ' |'
def pick(self):
'''Returns a (somewhat) random result object'''
return next(iter(self.__data.values()))[0]
def ignoreField(self, field):
for result in list(self):
result.ignoreField(field)
if not result:
del self[result]
def changedFields(self):
'''return the list of fields which changed'''
return sorted({h for result in self for h in result.dataKeys()})
def checkRemove(self, field, filterMethod):
'''
remove the field from each result if filterMethod returns true for the fromRow, toRow pairs. Removes any result which has no more inline differences
field is the field to check
filterMethod is a method which takes two parameters (the fromRow and toRow versions of the field) and returns if they can be removed from the result
'''
for result in list(self):
result.checkRemove(field, filterMethod)
if not result:
del self[result]
def checkRemove_multiField(self, filterMethod, *fields):
'''
remove the set of fields from each result if filterMethod returns true for those entries. Removes any result which has no more inline differences
filterMethod is a method which takes two dicts: fromRow and toRow, with those fields specified by the fields parameter and returns if those values can be removed from the result
fields is a list of fields to check and possibly remove
'''
for result in list(self):
result.checkRemove_multiField(filterMethod, *fields)
if not result:
del self[result]
def customCheck(self, filterMethod, *fieldsToRemove):
'''
remove the set of fields from each result if filterMethod returns true for the original fromRow and toRow pair. Removes any result with no more inline differences
filterMethod is a method which takes two dicts: fromRow and toRow, with the data from the original from and to rows
fields is the list of fields to remove when filterMethod returns true
'''
for result in list(self):
result.customCheck(self.keyFields, filterMethod, *fieldsToRemove)
if not result:
del self[result]
def originalFromRows(self):
'''return the original rows being diffed from'''
return DataTable(fromRow for result in self for fromRow in result.originalFromRows(self.keyFields))
def originalToRows(self):
'''return the original rows being diffed to'''
return DataTable(toRow for result in self for toRow in result.originalToRows(self.keyFields))
def _bucket(table, bucketHeaders, diffHeaders):
buckets = defaultdict(lambda : [])
for row in table:
key = tuple(row[h] for h in bucketHeaders)
value = tuple((row[h] if h in row else None) for h in diffHeaders)
buckets[key].append(value)
return buckets
def sortRowKey(row):
return tuple(sortKey(v) for v in row)
def diff(fromTable, toTable, *buckets):
'''The base diff method - buckets the data and ships it off to the Result and ResultSet classes to check for in-line differences'''
#split the data into buckets
fromBucketHeaders, toBucketHeaders = ([b for b in buckets if b in table.headers()] for table in (fromTable, toTable))
commonOtherHeaders = list(set(fromTable.headers()).intersection(toTable.headers()).difference(buckets))
fromOtherHeaders, toOtherHeaders = ([h for h in table.headers() if h not in bucketHeaders and h not in commonOtherHeaders] for table, bucketHeaders in ((fromTable, fromBucketHeaders), (toTable, toBucketHeaders)))
diffHeaders = {h: i for i, h in enumerate(commonOtherHeaders + fromOtherHeaders + toOtherHeaders)}
diffHeadersList = [None] * len(diffHeaders)
for h, i in diffHeaders.items():
diffHeadersList[i] = h
fromBuckets, toBuckets = (_bucket(table, bucketHeaders, diffHeadersList) for table, bucketHeaders in ((fromTable, fromBucketHeaders), (toTable, toBucketHeaders)))
allKeys = set(fromBuckets.keys()).union(toBuckets.keys())
results = ResultSet(buckets)
for key in allKeys:
if key in fromBuckets:
fromBucket = sorted(fromBuckets[key], key=sortRowKey)
else:
fromBucket = None
if key in toBuckets:
toBucket = sorted(toBuckets[key], key=sortRowKey)
else:
toBucket = None
if fromBucket and toBucket and len(fromBucket) == len(toBucket):
for fromRow, toRow in zip(fromBucket, toBucket):
results += Result(key, buckets, diffHeaders, [fromRow], [toRow])
else:
results += Result(key, buckets, diffHeaders, fromBucket, toBucket)
return results
def _formatResults(results):
'''Produce a pretty string for printing to the screen
format:
header line: "bucket", Field, , Field, ...
data lines: bucket, field_from, field_to, field_from, field_to...
'''
if not results:
yield 'No results to compare'
return
mismatch = sorted(result for result in results if result.fromRow is None or result.toRow is None or len(result.fromRow) != len(result.toRow))
keyMaxLengths = results.maxKeyLengths()
keyTotalSize = len(results.formatKeyFields(keyMaxLengths))
if mismatch:
yield "Buckets don't match number of rows:"
yield results.formatKeyFields(keyMaxLengths) + ' From Rows To Rows'
for result in mismatch:
yield result.formatKeys(keyMaxLengths) + ' %-12d %-12d' % (len(result.fromRow) if result.fromRow else 0, len(result.toRow) if result.toRow else 0)
results = results.filter(lambda result: result.fromRow and result.toRow and len(result.fromRow) == len(result.toRow))
if not results:
yield 'No inline differences'
return
yield 'Changes in common buckets:'
headers = results.changedFields()
resultList = []
maxLens = [keyTotalSize] + [0]*(len(headers)*2)
for i in range(len(headers)):
maxLens[i*2+1] = len(str(headers[i]))
for result in results:
buckets = (result.formatKeys(keyMaxLengths),)
for i, h in enumerate(headers):
if h in result:
maxLens[i*2+1] = max(maxLens[i*2+1], len(str(result[h][0])))
maxLens[i*2+2] = max(maxLens[i*2+2], len(str(result[h][1])))
buckets += result[h]
else:
buckets += '', ''
resultList.append(buckets)
maxLens = [str(m+1) for m in maxLens]
linePattern = '%-' + 's%-'.join(maxLens) + 's'
yield linePattern % ((results.formatKeyFields(keyMaxLengths),) + sum(((h,'') for h in headers), ()))
for result in resultList:
yield linePattern % result
def formatResults(results):
return '\n'.join(_formatResults(results))
def expectedChange(beforeValue, afterValue):
return lambda f, t: (f, t) == (beforeValue, afterValue)
def fromNothingToNothing(f, t):
return not (f or t)
|
import sys
import os
def debug(msg):
if (len(sys.argv) > 2):
if (sys.argv[2] == 'debug'):
print(msg)
if (len(sys.argv) < 2):
print('Usage: {} file.dyn [debug]'.format(sys.argv[0]))
sys.exit(1)
filename = sys.argv[1]
dest = os.path.splitext(filename)[0] + '.zip'
# XOR key and shuffle index arrays for decryption
CHUNK_SIZE = 8
key = bytearray([(0x8D + i) for i in range(CHUNK_SIZE)])
indices = [5, 3, 6, 7, 4, 2, 0, 1]
debug('chunk size = {}, key = {}, indices = {}'.format(CHUNK_SIZE, key, indices))
debug('filename = {}, dest = {}'.format(filename, dest))
with open(filename, 'rb') as file, open(dest, 'wb') as output:
while True:
# Read in a chunk of raw data
chunk = file.read(CHUNK_SIZE)
if not chunk:
break
# Need a full 8-byte chunk to decrypt.
if(len(chunk) == CHUNK_SIZE):
out = bytearray(CHUNK_SIZE)
# Decrypt the chunk.
for i in range(CHUNK_SIZE):
out[i] = chunk[indices[i]] ^ key[i]
else:
out = chunk
debug('out = {}'.format(out))
output.write(out)
print('Decrypted {} to {}'.format(filename, dest))
|
class Solution:
def isPalindrome(self, s: str) -> bool:
special_char = "~!@#$%^&*()_+`-=[]\\;',./{} |:<>?\"'"
s = s.lower()
for i in special_char:
s = s.replace(i, "")
return s == s[::-1] |
'''
reference
https://blog.csdn.net/zhang_gang1989/article/details/72884662
https://blog.csdn.net/weixin_38215395/article/details/78679296
'''
#change lineArr[1] the index accoring to the data trace format
import os
import argparse
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def ReadDelayData(fileName):
sum_owd=0.0;
sum_lines=0;
with open(fileName) as txtData:
for line in txtData.readlines():
lineArr = line.strip().split()
sum_owd+=float(lineArr[2])
sum_lines+=1;
return sum_owd,sum_lines
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--algo', type=str, default ='olia')
args = parser.parse_args()
algo=args.algo
instance=[1,2,3,4,5,6,7,8]
flows=4;
data_dir="data_process"
out_path=data_dir+"/"
fileout="%s_owd.txt"%(algo)
name="%s_%s_%s_owd.txt"
mkdir(out_path)
fout=open(out_path+fileout,'w')
for case in range(len(instance)):
total_owd=0.0
total_lines=0
average_owd=0.0
exist=False
for i in range(flows):
sum_delay=0.0
sum_lines=0
average_owd=0.0
filename=name%(str(instance[case]),algo,str(i+1))
if os.path.exists(filename):
sum_delay,sum_lines=ReadDelayData(filename)
total_owd+=sum_delay
total_lines+=sum_lines
exist=True
if exist:
average_owd=total_owd/total_lines
fout.write(str(instance[case])+"\t")
fout.write(str(average_owd)+"\n")
else:
fout.write(str(instance[case])+"\n")
fout.close()
|
#!/usr/bin/env python
# coding=utf-8
import xlrd
from lxml import etree
import json
import io
import sys
def read_xls(fromfile):
# with open(fromfile) as f:
# content = f.read()
#
# print (content)
# return
book = xlrd.open_workbook(fromfile)
sheet = book.sheet_by_name('student')
dataList = []
rows = sheet.nrows
cols = sheet.ncols
for row in range(rows):
dict = {}
dict['stu_no'] = sheet.cell_value(row, 0)
dict['stu_name'] = sheet.cell_value(row, 1)
dict['chinese'] = int(sheet.cell_value(row, 2))
dict['math'] = int(sheet.cell_value(row, 3))
dict['english'] = int(sheet.cell_value(row, 4))
dataList.append(dict)
return (dataList, json.dumps(dataList, ensure_ascii=False))
def to_json_file(json):
with open('../cache/student_new.json', 'wt') as f:
f.write(json)
def to_xml(dataList):
root = etree.Element('root')
# Create a comment
root.append(etree.Comment(u' 学生信息表\n\t[学号, 姓名, 语文, 数学, 英语]'))
stuList = etree.SubElement(root, 'student_list')
# Create student item
for item in dataList:
stu = etree.SubElement(stuList, 'stu')
stu.set('stu_no', item['stu_no'])
stu.text = item['stu_name']
stu.set('chinese', str(item['chinese']))
stu.set('math', str(item['math']))
stu.set('english', str(item['english']))
# Save to file
tree = etree.ElementTree(root)
tree.write('../cache/student_list.xml', encoding='utf-8', pretty_print=True, \
xml_declaration=True)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
(dataList, json) = read_xls('../cache/student.xls')
to_json_file(json)
to_xml(dataList)
|
from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=3, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=6, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
'backfill-taxonomies': {
'schedule': crontab(hour=21, minute=0),
'task': 'dexter.tasks.backfill_taxonomies',
},
}
|
import os
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from src.crowd_count import CrowdCounter
from src.data_loader import ImageDataLoader
from src import utils
import h5py
import scipy.io as io
import PIL.Image as Image
import numpy as np
import os
import glob
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
import torchvision.transforms.functional as F
from matplotlib import cm as CM
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
from tqdm import tqdm
import math
from torchvision import datasets, transforms
from utils_adv_patch import *
import argparse
class FC(nn.Module):
def __init__(self, in_features, out_features, relu=True):
super(FC, self).__init__()
self.fc = nn.Linear(in_features, out_features)
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.fc(x)
if self.relu is not None:
x = self.relu(x)
return x
def save_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def np_to_variable(x, is_cuda=True, is_training=False, dtype=torch.FloatTensor):
if is_training:
v = Variable(torch.from_numpy(x).type(dtype))
else:
v = Variable(torch.from_numpy(x).type(dtype), requires_grad=False, volatile=True)
if is_cuda:
v = v.cuda()
return v
def set_trainable(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0.0, dev)
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
# ******************************************* kappa *********************************************
def generate_kappa_schedule():
kappa_schedule = []
for j in range(50):
kappa_schedule.append(1)
kappa_value = 1.0
step = 0.5 / 350
for i in range(350):
kappa_value -= step
kappa_schedule.append(kappa_value)
for k in range(500):
kappa_schedule.append(0.5)
return kappa_schedule
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print("using cuda: ", format(device))
#FIXME correct done
def attack(net, tgt_img_var, patch_var, mask_var, patch_init_var, gt_data_var, target_var, criterion):
# net.eval()
adv_tgt_img_var = torch.mul((1 - mask_var), tgt_img_var) + torch.mul(mask_var, patch_var)
loss_scalar = 0
# print("now next patch: \n")
for i in range(args.attack_epoch):
adv_tgt_img_var = Variable(adv_tgt_img_var.data, requires_grad=True)
just_the_patch_var = Variable(patch_var.data, requires_grad=True)
adv_out_var = net(adv_tgt_img_var, gt_data_var) # 要把gt_data 和 im_data 喂进去, ,并且输入都必须是numpy数组
loss_data = criterion(adv_out_var, target_var)
loss_reg = F.l1_loss(torch.mul(mask_var, just_the_patch_var), torch.mul(mask_var, patch_init_var))
loss = (1 - args.alpha) * loss_data + args.alpha * loss_reg
loss.backward()
adv_tgt_img_grad = adv_tgt_img_var.grad.clone()
adv_tgt_img_var.grad.data.zero_()
patch_var -= torch.clamp(0.5 * args.lr * adv_tgt_img_grad, -2, 2)
adv_tgt_img_var = torch.mul((1 - mask_var), tgt_img_var) + torch.mul(mask_var, patch_var)
adv_tgt_img_var = torch.clamp(adv_tgt_img_var, -1, 1)
loss_scalar = loss.item()
# print("attack_loss_epoch: ", loss_scalar)
return adv_tgt_img_var, patch_var
#FIXME correct done
def train(net, patch, patch_shape, mask, patch_init, data_loader_train, criterion, optimizer, kappa, epoch, output_dir,
method, dataset_name, Loss_list):
patch_shape_orig = patch_shape
net.train()
epoch_loss = 0.0
for blob in data_loader_train:
im_data = blob['data'] # (1,1,645,876) # np数组
gt_data = blob['gt_density'] # (1,1,327,546) np数组
data_shape = im_data.shape # (1,1,786,1024)
im_data_gt = torch.from_numpy(im_data)
tgt_img_var = Variable(im_data_gt.to(device))
gt_data_var = torch.from_numpy(gt_data)
gt_data_var = Variable(gt_data_var.to(device))
if args.patch_type == 'circle':
patch, mask, patch_init, rx, ry, patch_shape = circle_transform(patch, mask, patch_init, data_shape,
patch_shape)
elif args.patch_type == 'square':
patch, mask, patch_init, rx, ry = square_transform(patch, mask, patch_init, data_shape, patch_shape)
patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask)
patch_init = torch.FloatTensor(patch_init)
patch, mask = patch.to(device), mask.to(device)
patch_init = patch_init.to(device)
patch_var, mask_var = Variable(patch), Variable(mask)
patch_init_var = Variable(patch_init).to(device)
# FIXME the parameter is -1 ?
target_var = Variable(-1 * gt_data_var.data.clone(), requires_grad=True).to(device)
adv_tgt_img_var, patch_var = attack(net, tgt_img_var, patch_var, mask_var, patch_init_var, gt_data_var,
target_var, criterion)
adv_out_var = net(adv_tgt_img_var, gt_data_var)
normal_out_var = net(tgt_img_var, gt_data_var)
# FIXME final balance 0.5 and 0.5
loss_data = (1 - kappa[epoch]) * criterion(adv_out_var, gt_data_var) \
+ kappa[epoch] * criterion(normal_out_var, gt_data_var)
epoch_loss += loss_data.item()
optimizer.zero_grad()
loss_data.backward()
optimizer.step()
masked_patch_var = torch.mul(mask_var, patch_var)
patch = masked_patch_var.data.cpu().numpy()
mask = mask_var.data.cpu().numpy()
patch_init = patch_init_var.data.cpu().numpy()
new_patch = np.zeros(patch_shape)
new_mask = np.zeros(patch_shape)
new_patch_init = np.zeros(patch_shape)
for x in range(new_patch.shape[0]):
for y in range(new_patch.shape[1]):
new_patch[x][y] = patch[x][y][ry:ry + patch_shape[-2], rx:rx + patch_shape[-1]]
new_mask[x][y] = mask[x][y][ry:ry + patch_shape[-2], rx:rx + patch_shape[-1]]
new_patch_init[x][y] = patch_init[x][y][ry:ry + patch_shape[-2], rx:rx + patch_shape[-1]]
patch = new_patch
mask = new_mask
patch_init = new_patch_init
patch = zoom(patch, zoom=(1, 1, patch_shape_orig[2] / patch_shape[2], patch_shape_orig[3] / patch_shape[3]),
order=1)
mask = zoom(mask, zoom=(1, 1, patch_shape_orig[2] / patch_shape[2], patch_shape_orig[3] / patch_shape[3]),
order=0)
patch_init = zoom(patch_init,
zoom=(1, 1, patch_shape_orig[2] / patch_shape[2], patch_shape_orig[3] / patch_shape[3]),
order=1)
Loss_list.append(epoch_loss / data_loader_train.get_num_samples())
# save model parameter
save_name = os.path.join(output_dir, '{}_{}_{}_{}.h5'.format(method, dataset_name, epoch,
epoch_loss / data_loader_train.get_num_samples()))
save_net(save_name, net)
# for observation
print("epoch: ", epoch)
print(Loss_list)
train_loss_txt = open('./adv_train_0.08/train_loss.txt', 'a')
train_loss_txt.write(str(Loss_list[epoch]))
train_loss_txt.write('\n')
train_loss_txt.close()
return adv_tgt_img_var, patch, adv_out_var, mask, patch_shape
def test(patch, mask, patch_shape, data_loader_val, net):
mae_gt = 0.0
mse_gt = 0.0
mse_adv = 0.0
mae_adv = 0.0
net.eval()
for blob in data_loader_val:
im_data = blob['data'] # (1,1,645,876) # np数组
gt_data = blob['gt_density'] # (1,1,327,546) np数组
data_shape = im_data.shape # (1,1,786,1024)
im_data_gt = torch.from_numpy(im_data)
tgt_img_var = Variable(im_data_gt.to(device))
gt_data_var = torch.from_numpy(gt_data)
gt_data_var = Variable(gt_data_var.to(device))
density_map = net(tgt_img_var, gt_data_var)
if args.patch_type == 'circle':
patch_full, mask_full, _, _, _, _ = circle_transform_test(patch, mask, patch.copy(),
data_shape,
patch_shape, True)
elif args.patch_type == 'square':
patch_full, mask_full, patch_init, rx, ry = square_transform(patch, mask, patch_init, data_shape,
patch_shape)
patch_full, mask_full = torch.FloatTensor(patch_full), torch.FloatTensor(mask_full)
patch_full, mask_full = patch_full.to(device), mask_full.to(device)
patch_var, mask_var = Variable(patch_full), Variable(mask_full)
adv_tgt_img_var = torch.mul((1 - mask_var), tgt_img_var) + torch.mul(mask_var, patch_var)
adv_tgt_img_var = torch.clamp(adv_tgt_img_var, -1, 1)
adv_out_var = net(adv_tgt_img_var, gt_data_var) # 要把gt_data 和 im_data 喂进去, ,并且输入都必须是numpy数组
density_map = density_map.data.detach().cpu().numpy()
adv_out = adv_out_var.data.detach().cpu().numpy()
gt_count = np.sum(gt_data)
et_count = np.sum(density_map)
adv_count = np.sum(adv_out)
mae_gt += abs(gt_count - et_count)
mse_gt += ((gt_count - et_count)*(gt_count - et_count))
mae_adv += abs(gt_count - adv_count)
mse_adv += ((gt_count - adv_count)*(gt_count - adv_count))
mae_gt = mae_gt / data_loader_val.get_num_samples()
mse_gt = np.sqrt(mse_gt / data_loader_val.get_num_samples())
mae_adv = mae_adv / data_loader_val.get_num_samples()
mse_adv = np.sqrt(mse_adv / data_loader_val.get_num_samples())
print('\nMAE_gt: %0.2f, MSE_gt: %0.2f' % (mae_gt, mse_gt))
print('\nMAE_adv: %0.2f, MSE_adv: %0.2f' % (mae_adv, mse_adv))
f = open('./adv_train_0.08/adv_results.txt', 'a')
f.write('adv_mae: %s \n' % str(mae_adv))
f.write('adv_mse: %s \n' % str(mse_adv))
f.write('\n')
f.close()
f = open('./adv_train_0.08/normal_results.txt', 'a')
f.write('normal_mae: %s \n' % str(mae_gt))
f.write('normal_mse: %s \n' % str(mse_gt))
f.write('\n')
f.close()
def main():
train_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/train'
train_gt_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/train_den'
val_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/val'
val_gt_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/val_den'
# data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
# gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'
# model_path = './pretrain_models/mcnn_shtechA_660.h5'
if not os.path.exists('./adv_train_0.08'):
os.makedirs('./adv_train_0.08')
output_dir = './adv_train_0.08'
method = 'MCNN'
dataset_name = 'A'
data_loader_train = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True)
data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
'''
# eval
net = CrowdCounter()
trained_model = os.path.join(model_path)
load_net(trained_model, net)
net.to(device)
net.eval()
'''
net = CrowdCounter()
weights_normal_init(net, dev=0.01)
net.to(device)
params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr_train_models)
kappa = generate_kappa_schedule()
criterion = torch.nn.MSELoss()
cudnn.benchmark = True
Loss_list = []
# 初始化patch,MCNN特需(1,1,1024*patch_size,1024*patch_size)
if args.patch_type == 'circle': # image_size = 1024(default)
patch, mask, patch_shape = init_patch_circle(args.image_size, args.patch_size)
patch_init = patch.copy()
elif args.patch_type == 'square':
patch, patch_shape = init_patch_square(args.image_size, args.patch_size)
patch_init = patch.copy()
mask = np.ones(patch_shape)
for epoch in range(0, args.train_epoch):
adv_tgt_img_var, patch, adv_out_var, mask, patch_shape = train(net, patch, patch_shape, mask, patch_init, data_loader_train, criterion, optimizer, kappa, epoch, output_dir, method, dataset_name, Loss_list)
test(patch, mask, patch_shape, data_loader_val, net)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Patch Attack Parameters')
parser.add_argument('--lr', type=float, default=0.0001, help='the learning rate for the patch optimization')
parser.add_argument('--lr_train_models', type=float, default=0.0001, help='learning rate for model')
parser.add_argument('--patch_type', type=str, default='circle')
parser.add_argument("--patch_size", default=0.08, type=float, help="0.02 | 0.04 | 0.08 | 0.16")
parser.add_argument("--image_size", default=200, type=str, help="this size is for the 9 patch training set")
parser.add_argument("--train_epoch", default=800, type=int, help="the training epochs")
# parser.add_argument("--keep", default=100, type=str, help="randomized ablation parameter")
# parser.add_argument('--max_count', type=int, default='400', help='the max iteration numbers of patch optimization')
parser.add_argument("--alpha", default=0, type=float, help="balance in the attack() loss function")
parser.add_argument("--attack_epoch", default=5, type=int, help='epochs needed for every patch')
args = parser.parse_args()
main()
|
Subsets and Splits