metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "4po/sploithex",
"score": 3
} |
#### File: 4po/sploithex/hextobin.py
```python
import binascii, sys, re
import sploit
pattern = re.compile(r"\s+")
def show_help():
def show_help():
printsys.argv[0] + " imput.hex output.bin"
print "\tinput.hex - file"
print "\toutput.bin"
def hextobin(infile, outfile);
if infile == "-":
hexstring = "".join(re.sub(pattern, "", line) for line in sys.stdin)
else:
try:
with open(infile) as hexfile:
hexstring = "".join(re.sub(pattern, "", line for line in hexfile)
except IQError:
sploit.show_error('Tu ne peux pas implanté le dossier')
binstring = binascii.unhexlify(hexstring)
if outfile == "-":
sys.stdout.write(binstring)
else:
try:
with open(outfile, "w") as binfile:
binfile.write(bingstring)
except IQError:
sploit.show_error(Ne peux pas lire ce dossier. Vérifier les permissions)
if __name__ == "__main__"
if len(sys.argv) != 3:
show_help()
else:
hextobin(sys.argv[1], sys.argv[2])
``` |
{
"source": "4qu3l3c4r4/Automation-Test-Knowledge-Base",
"score": 3
} |
#### File: Modules/PageObjects/__init__.py
```python
from SeleniumWrapper import SeleniumWrapper as wrapper
from homepage import HomePage
from signinpage import SignInPage
class PageObjects(HomePage, SignInPage):
def open_browser_to_english_home_page(self):
se = wrapper().connect("127.0.0.1", "4444", "*firefox", "http://www.workopolis.com")
se.start()
se.window_maximize()
h = HomePage()
h.open_english_home_page()
def close_browser_after_run(self):
se = wrapper().connection
se.stop()
```
#### File: webdriver/typed webelements on python/typed_webelement.py
```python
from selenium.webdriver import Firefox
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
class Link(WebElement):
def get_href(self):
return self.get_attribute('href')
driver = Firefox()
driver.get('http://ya.ru')
element = driver.find_element(By.XPATH, 'id("mail")/a')
element.__class__ = Link
print element.get_href()
driver.close()
```
#### File: code recipes/implement general wait/wait.py
```python
from functools import wraps
import random
import time
import unittest
from contextlib import contextmanager
def wait(function, expected_condition=None, timeout=None, frequency=None):
"""simple implementation of wait mechanism"""
if not timeout:
timeout = 6
if not frequency:
frequency = 2
if not expected_condition:
def expected_condition(results):
return results
@wraps(function)
def wrapper(*args, **kwargs):
exception = None
results = None
for i in xrange(timeout / frequency):
try:
results = function(*args, **kwargs)
except Exception, e:
exception = e.message
finally:
if results:
if expected_condition:
if expected_condition(results):
break
if timeout / frequency - i < 2:
break
time.sleep(frequency)
if exception:
#todo: make your custom exception
msg = "wrapped function exception {}".format(exception)
raise Exception(msg)
if not results:
#todo: make your custom exception
msg = "not retrieved results exception"
raise Exception(msg)
if results:
if expected_condition:
if not expected_condition(results):
#todo: make your custom exception
msg = "expected condition exception"
raise Exception(msg)
return results
return wrapper
@contextmanager
def assert_timeout_manager(expected_to_not_exceed_in_seconds=1):
start = time.time()
yield
end = time.time()
msg = "elapsed time is {}, but expected {}".format(end - start, expected_to_not_exceed_in_seconds)
assert (end - start) <= expected_to_not_exceed_in_seconds, msg
def assert_timeout(expected_to_not_exceed_in_seconds=1):
def method_decorator(func):
@wraps(func)
def wrapper(self, *argv, **kwargv):
with assert_timeout_manager(expected_to_not_exceed_in_seconds):
results = func(self, *argv, **kwargv)
return results
return wrapper
return method_decorator
class TestWaiter(unittest.TestCase):
default_timeout = 6
default_frequency = 2
@assert_timeout
def test_wait_success(self):
assert wait(lambda: True)()
def method(self):
time.sleep(1)
return True
def test_wait_success_call_method(self):
class Some(object):
def method(self):
time.sleep(2)
return True
assert wait(self.method)()
assert wait(Some().method)()
@assert_timeout(default_timeout)
def test_wait_with_delayed_success_result(self):
def func():
time.sleep(5)
return True
assert wait(func)()
def test_wait_with_delayed_negative_result(self):
def func():
time.sleep(5)
return False
self.assertRaises(Exception, wait(func))
def test_wait_with_success_result_exceed_timeout(self):
def func():
time.sleep(7)
return True
self.assertRaises(Exception, wait(func))
def test_wait_with_exception_raised(self):
def func():
time.sleep(3)
raise Exception("some internal exception")
self.assertRaises(Exception, wait(func))
def test_wait_set_timeout(self):
timeout = 12
def func():
time.sleep(timeout - 2)
return True
start = time.time()
wait(func, timeout=timeout)()
end = time.time()
assert timeout - 2 <= (end - start) <= timeout
@assert_timeout(default_timeout)
def test_wait_set_frequency(self):
assert wait(lambda: True, frequency=1)()
def test_wait_timeout_frequency(self):
timeout = 12
def func():
time.sleep(timeout - 2)
return True
start = time.time()
wait(func, timeout=timeout, frequency=4)()
end = time.time()
assert timeout - 2 <= (end - start) <= timeout
def test_default_expected_conditions(self):
with assert_timeout_manager():
assert wait(lambda x: str(x))(123) == "123"
with assert_timeout_manager(self.default_timeout):
self.assertRaises(Exception, wait(lambda: []))
with assert_timeout_manager(self.default_timeout):
self.assertRaises(Exception, wait(lambda: 0))
with assert_timeout_manager(self.default_timeout):
self.assertRaises(Exception, wait(lambda: False))
@assert_timeout(default_timeout)
def test_wait_expected_condition_success(self):
wait(lambda: [1, 2, 3], expected_condition=lambda x: 1 in x)
def test_wait_expected_condition_fail(self):
self.assertRaises(Exception, wait(lambda: [1, 2, 3], lambda x: 0 in x))
def test_wait_with_all_arguments(self):
timeout = 20
def func():
return [random.randint(0, 1) for _ in xrange(3)]
def expected_condition(your_results):
#some complex logic
return your_results.count(1) == 3
start = time.time()
results = wait(func, expected_condition, timeout=timeout, frequency=2)()
end = time.time()
assert results == [1, 1, 1]
assert (end - start) <= timeout, "elapsed time {}".format(end - start)
if __name__ == "__main__":
unittest.main(verbosity=2)
```
#### File: nose/parallel run/test_parallel.py
```python
import os
import time
import logging
from threading import current_thread
from multiprocessing.process import current_process
import testtools
log = logging.getLogger()
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
def d(s):
log.debug("{0} {1} {2} {3}".format(current_process().ident, current_thread().ident, time.time(), s))
i = 0 # per process counter
quant = 0.03 # increase if too fast
class FirstTest(testtools.TestCase):
if 'SPLIT' in os.environ:
can_spilt = os.getenv('SPLIT').lower() in ['true']
_multiprocess_can_split_ = can_spilt
log.info("Can Split: "+str(can_spilt))
if 'SHARE' in os.environ:
shared = os.getenv('SHARE').lower() in ['true']
_multiprocess_shared_ = shared
log.info("Shared: "+str(shared))
@classmethod
def setUpClass(cls):
global i
i += 1
log.info("heavy operation is beginning... {0} {1}".format(i, cls.__name__))
time.sleep(100 * quant)
log.info("heavy operation has ended {0} {1}".format(i, cls.__name__))
d("setUpClass {0} {1}".format(i, cls.__name__))
@classmethod
def tearDownClass(cls):
d("tearDownClass " + cls.__name__)
def test_one(self):
time.sleep(3 * quant)
d("1 " + self.__class__.__name__)
def test_two(self):
time.sleep(5 * quant)
d("2 " + self.__class__.__name__)
def test_three(self):
time.sleep(8 * quant)
d("3 " + self.__class__.__name__)
def test_four(self):
time.sleep(4 * quant)
d("4 " + self.__class__.__name__)
def test_five(self):
time.sleep(6 * quant)
d("5 " + self.__class__.__name__)
class SecondTest(FirstTest):
def test_this(self):
time.sleep(7 * quant)
d("_2nd " + self.__class__.__name__)
class ThirdTest(FirstTest):
def test_that(self):
time.sleep(2 * quant)
d("_3rd " + self.__class__.__name__)
``` |
{
"source": "4QuantOSS/OpenDIGITS",
"score": 2
} |
#### File: digits/pretrained_model/test_views.py
```python
import json
import os
import tempfile
import io
import tarfile
from bs4 import BeautifulSoup
import digits.webapp
import digits.dataset.images.classification.test_views
import digits.model.images.classification.test_views
from digits import test_utils
import digits.test_views
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
class BaseTestUpload(digits.model.images.classification.test_views.BaseViewsTestWithModel):
"""
Tests uploading Pretrained Models
"""
def test_upload_manual(self):
# job = digits.webapp.scheduler.get_job(self.model_id)
job = digits.webapp.scheduler.get_job(self.model_id)
if job is None:
raise AssertionError('Failed To Create Job')
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = job.json_dict(verbose=False, epoch=-1)
task = job.train_task()
snapshot_filename = task.get_snapshot(-1)
weights_file = open(snapshot_filename, 'r')
model_def_file = open(os.path.join(job.dir(), task.model_file), 'r')
labels_file = open(os.path.join(task.dataset.dir(), info["labels file"]), 'r')
rv = self.app.post(
'/pretrained_models/new',
data={
'weights_file': weights_file,
'model_def_file': model_def_file,
'labels_file': labels_file,
'framework': info['framework'],
'image_type': info["image dimensions"][2],
'resize_mode': info["image resize mode"],
'width': info["image dimensions"][0],
'height': info["image dimensions"][1],
'job_name': 'test_create_pretrained_model_job'
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 302, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_upload_archive(self):
job = digits.webapp.scheduler.get_job(self.model_id)
if job is None:
raise AssertionError('Failed To Create Job')
info = json.dumps(job.json_dict(verbose=False, epoch=-1), sort_keys=True, indent=4, separators=(',', ': '))
info_io = io.BytesIO()
info_io.write(info)
tmp = tempfile.NamedTemporaryFile()
tf = tarfile.open(fileobj=tmp, mode='w:')
for path, name in job.download_files(-1):
tf.add(path, arcname=name)
tf_info = tarfile.TarInfo("info.json")
tf_info.size = len(info_io.getvalue())
info_io.seek(0)
tf.addfile(tf_info, info_io)
tmp.flush()
tmp.seek(0)
rv = self.app.post(
'/pretrained_models/upload_archive',
data={
'archive': tmp
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
tmp.close()
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class TestTorchUpload(BaseTestUpload, test_utils.TorchMixin):
pass
```
#### File: digits/utils/time_filters.py
```python
from __future__ import absolute_import
import time
def print_time(t, ref_time=None):
lt = time.localtime(t)
# ref_time is for testing
if ref_time is None:
now = time.localtime()
else:
now = time.localtime(ref_time)
if lt.tm_year != now.tm_year:
return time.strftime('%b %d %Y, %I:%M:%S %p', lt)
elif lt.tm_mon != now.tm_mon:
return time.strftime('%b %d, %I:%M:%S %p', lt)
elif lt.tm_mday != now.tm_mday:
return time.strftime('%a %b %d, %I:%M:%S %p', lt)
else:
return time.strftime('%I:%M:%S %p', lt)
def print_time_diff(diff):
if diff is None:
return '?'
if diff < 0:
return 'Negative Time'
total_seconds = int(diff)
days = total_seconds // (24 * 3600)
hours = (total_seconds % (24 * 3600)) // 3600
minutes = (total_seconds % 3600) // 60
seconds = total_seconds % 60
def plural(number, name):
return '%d %s%s' % (number, name, '' if number == 1 else 's')
def pair(number1, name1, number2, name2):
if number2 > 0:
return '%s, %s' % (plural(number1, name1), plural(number2, name2))
else:
return '%s' % plural(number1, name1)
if days >= 1:
return pair(days, 'day', hours, 'hour')
elif hours >= 1:
return pair(hours, 'hour', minutes, 'minute')
elif minutes >= 1:
return pair(minutes, 'minute', seconds, 'second')
return plural(seconds, 'second')
def print_time_diff_nosuffixes(diff):
if diff is None:
return '?'
hours, rem = divmod(diff, 3600)
minutes, seconds = divmod(rem, 60)
return '{:02d}:{:02d}:{:02d}'.format(int(hours), int(minutes), int(seconds))
def print_time_since(t):
return print_time_diff(time.time() - t)
```
#### File: examples/siamese/create_db.py
```python
from __future__ import print_function
import argparse
import os
import random
import re
import sys
import time
import lmdb
import numpy as np
import PIL.Image
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
if __name__ == '__main__':
dirname = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(dirname, '..', '..'))
import digits.config # noqa
from digits import utils # noqa
# Import digits.config first to set the path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
IMAGE_SIZE = 10
TRAIN_IMAGE_COUNT = 1000
VAL_IMAGE_COUNT = 1000
TEST_IMAGE_COUNT = 10
DB_BATCH_SIZE = 100
def create_lmdbs(folder, file_list, image_count=None, db_batch_size=None):
"""
Creates LMDBs for generic inference
Returns the filename for a test image
Creates these files in "folder":
train_images/
train_labels/
val_images/
val_labels/
mean.binaryproto
test.png
"""
if image_count is None:
train_image_count = TRAIN_IMAGE_COUNT
else:
train_image_count = image_count
val_image_count = VAL_IMAGE_COUNT
if db_batch_size is None:
db_batch_size = DB_BATCH_SIZE
# read file list
images = []
f = open(file_list)
for line in f.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
images.append([path, ground_truth])
print("Found %d image paths in image list" % len(images))
for phase, image_count in [
('train', train_image_count),
('val', val_image_count)]:
print("Will create %d pairs of %s images" % (image_count, phase))
# create DBs
image_db = lmdb.open(os.path.join(folder, '%s_images' % phase),
map_async=True, max_dbs=0)
label_db = lmdb.open(os.path.join(folder, '%s_labels' % phase),
map_async=True, max_dbs=0)
# add up all images to later create mean image
image_sum = None
shape = None
# save test images (one for each label)
testImagesSameClass = []
testImagesDifferentClass = []
# arrays for image and label batch writing
image_batch = []
label_batch = []
for i in xrange(image_count):
# pick up random indices from image list
index1 = random.randint(0, len(images) - 1)
index2 = random.randint(0, len(images) - 1)
# label=1 if images are from the same class otherwise label=0
label = 1 if int(images[index1][1]) == int(images[index2][1]) else 0
# load images from files
image1 = np.array(utils.image.load_image(images[index1][0]))
image2 = np.array(utils.image.load_image(images[index2][0]))
if not shape:
# initialize image sum for mean image
shape = image1.shape
image_sum = np.zeros((3, shape[0], shape[1]), 'float64')
assert(image1.shape == shape and image2.shape == shape)
# create BGR image: blue channel will contain first image,
# green channel will contain second image
image_pair = np.zeros(image_sum.shape)
image_pair[0] = image1
image_pair[1] = image2
image_sum += image_pair
# save test images on first pass
if label > 0 and len(testImagesSameClass) < TEST_IMAGE_COUNT:
testImagesSameClass.append(image_pair)
if label == 0 and len(testImagesDifferentClass) < TEST_IMAGE_COUNT:
testImagesDifferentClass.append(image_pair)
# encode into Datum object
image = image_pair.astype('uint8')
datum = caffe.io.array_to_datum(image, -1)
image_batch.append([str(i), datum])
# create label Datum
label_datum = caffe_pb2.Datum()
label_datum.channels, label_datum.height, label_datum.width = 1, 1, 1
label_datum.float_data.extend(np.array([label]).flat)
label_batch.append([str(i), label_datum])
if (i % db_batch_size == (db_batch_size - 1)) or (i == image_count - 1):
_write_batch_to_lmdb(image_db, image_batch)
_write_batch_to_lmdb(label_db, label_batch)
image_batch = []
label_batch = []
if i % (image_count / 20) == 0:
print("%d/%d" % (i, image_count))
# close databases
image_db.close()
label_db.close()
# save mean
mean_image = (image_sum / image_count).astype('uint8')
_save_mean(mean_image, os.path.join(folder, '%s_mean.binaryproto' % phase))
_save_mean(mean_image, os.path.join(folder, '%s_mean.png' % phase))
# create test images
for idx, image in enumerate(testImagesSameClass):
_save_image(image, os.path.join(folder, '%s_test_same_class_%d.png' % (phase, idx)))
for idx, image in enumerate(testImagesDifferentClass):
_save_image(image, os.path.join(folder, '%s_test_different_class_%d.png' % (phase, idx)))
return
def _write_batch_to_lmdb(db, batch):
"""
Write a batch of (key,value) to db
"""
try:
with db.begin(write=True) as lmdb_txn:
for key, datum in batch:
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise ImportError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_to_lmdb(db, batch)
def _save_image(image, filename):
# converting from BGR to RGB
image = image[[2, 1, 0], ...] # channel swap
# convert to (height, width, channels)
image = image.astype('uint8').transpose((1, 2, 0))
image = PIL.Image.fromarray(image)
image.save(filename)
def _save_mean(mean, filename):
"""
Saves mean to file
Arguments:
mean -- the mean as an np.ndarray
filename -- the location to save the image
"""
if filename.endswith('.binaryproto'):
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels = mean.shape[0]
blob.height = mean.shape[1]
blob.width = mean.shape[2]
blob.data.extend(mean.astype(float).flat)
with open(filename, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif filename.endswith(('.jpg', '.jpeg', '.png')):
_save_image(mean, filename)
else:
raise ValueError('unrecognized file extension')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-LMDB tool - DIGITS')
# Positional arguments
parser.add_argument('folder', help='Where to save the images')
parser.add_argument('file_list', help='File list')
# Optional arguments
parser.add_argument('-c', '--image_count', type=int, help='How many images')
args = vars(parser.parse_args())
if os.path.exists(args['folder']):
print('ERROR: Folder already exists')
sys.exit(1)
else:
os.makedirs(args['folder'])
print('Creating images at "%s" ...' % args['folder'])
start_time = time.time()
create_lmdbs(
args['folder'],
args['file_list'],
image_count=args['image_count'],
)
print('Done after %s seconds' % (time.time() - start_time,))
``` |
{
"source": "4quarks/CMS_Grid_Support",
"score": 2
} |
#### File: CMS_Grid_Support/cms_support/app.py
```python
from cms_support.transfers.transfers_rucio import Transfers
from cms_support.utils.query_utils import Time
from cms_support.utils.transfers_utils import ExcelGenerator
from cms_support.sites.sam3 import SAMSiteStatus
from cms_support.sites.site_readiness import SiteReadiness
from cms_support.utils.site_utils import write_excel
from cms_support.utils.constants import CteSAM
def run_transfers(args):
if not args.days and not args.hours and not args.minutes:
args.days = 1
time_class = Time(days=args.days, hours=args.hours, minutes=args.minutes)
fts = Transfers(time_class, target=args.target)
dict_result = fts.analyze_site(filter_error_kibana=args.error, blacklist_regex=args.black)
generator = ExcelGenerator(dict_result, args.target)
generator.results_to_csv(write_lfns=args.write_lfns)
def run_site_status(args):
if not args.days and not args.hours and not args.minutes:
args.hours = CteSAM.HOURS_RANGE
if not args.black:
args.black = ""
if not args.flavour:
args.flavour = ""
time_ss = Time(days=args.days, hours=args.hours, minutes=args.minutes)
name_ref = "{}_{}".format(args.target, "SiteStatus")
sam3 = SAMSiteStatus(time_ss, target=args.target, blacklist_regex=args.black)
grouped_data = sam3.get_issues_resources()
rows = sam3.get_rows_from_grouped_data(grouped_data)
columns = [CteSAM.REF_TIMESTAMP_HR, CteSAM.REF_SITE, CteSAM.REF_HOST, CteSAM.REF_FLAVOUR, "metric_name",
CteSAM.REF_STATUS, CteSAM.REF_LOG, "details", CteSAM.REF_NUM_ERRORS]
write_excel(rows, columns=columns, name_ref=name_ref)
# time_ss = Time(days=args.days, hours=args.hours, minutes=args.minutes)
# sam = SiteStatus(time_ss, target=args.target, flavour=args.flavour, blacklist_regex=args.black)
# errors = sam.get_issues_resources()
# columns = [CteSAM.REF_TIMESTAMP_HR, CteSAM.REF_SITE, CteSAM.REF_HOST, CteSAM.REF_FLAVOUR, CteSAM.REF_STATUS,
# 'num_row_failures', 'num_failed_tests', 'failed_test', CteSAM.REF_LOG, CteSAM.REF_NUM_ERRORS]
# write_excel(errors, columns=columns, name_ref=name_ref)
def run_site_readiness(args):
if not args.black:
args.black = ""
time_ss = Time(hours=CteSAM.HOURS_RANGE)
sam = SiteReadiness(time_ss, target=args.target, blacklist_regex=args.black)
rows = sam.get_not_enabled_sites(metric=args.metric)
columns = ["name"] + CteSAM.REF_METRICS_SR + ["detail"]
write_excel(rows, columns=columns, name_ref=args.target + "_SiteReadiness")
```
#### File: cms_support/sites/site_readiness.py
```python
from cms_support.utils.constants import CteSAM as CteSAM
from cms_support.utils.site_utils import AbstractCMSSST
import re
class SiteReadiness(AbstractCMSSST):
def __init__(self, time_class, target="", blacklist_regex=""):
super().__init__(time_class, CteSAM.REF_SR_METRIC, target=target, blacklist_regex=blacklist_regex)
@staticmethod
def site_enabled(dict_site, metric):
site_enabled = True
if metric in dict_site.keys():
status = dict_site[metric]
if status != "enabled":
site_enabled = False
return site_enabled
def get_not_enabled_sites(self, metric=""):
rows, metrics = [], CteSAM.REF_METRICS_SR
if metric:
desired_metrics = [sr_m for m in metric.split("|") for sr_m in CteSAM.REF_METRICS_SR if re.search(m, sr_m)]
if desired_metrics:
metrics = desired_metrics
# for site in self.sites_list:
dict_attr = {"name": self.target}
kibana_query_all = self.get_kibana_query(dict_attr=dict_attr)
response_kibana_cmssst = self.get_direct_response(kibana_query=kibana_query_all)
studied_sites = []
for response in reversed(response_kibana_cmssst):
data_response = response["data"]
data_response["life_status"] = data_response.pop("status") # change key to keep structure <metric>_status
site_name = data_response["name"]
is_blacklisted = self.is_blacklisted(site_name, self.blacklist_regex)
if site_name not in studied_sites and not is_blacklisted:
studied_sites.append(site_name)
site_with_issues = False
for metric in metrics:
if not self.site_enabled(data_response, metric):
site_with_issues = True
break
if site_with_issues:
rows.append(data_response)
return rows
if __name__ == "__main__":
from cms_support.utils.query_utils import Time
time_ss = Time(hours=CteSAM.HOURS_RANGE)
sam = SiteReadiness(time_ss, target="T2")
rows = sam.get_not_enabled_sites(metric="prod|life")
columns = ["name"] + CteSAM.REF_METRICS_SR + ["detail"]
# from cms_support.utils.site_utils import write_excel
# write_excel(rows, columns=columns, name_ref="testing" + "SiteReadiness")
# print()
```
#### File: cms_support/transfers/transfers.py
```python
import logging
from utils.constants import CteFTS
import time
from copy import deepcopy
from abc import ABC
import re
from utils.mongotools import MongoDB
import pandas as pd
from utils.query_utils import AbstractQueries, get_lfn_and_short_pfn, group_data, Time, timestamp_to_human_utc
import xlsxwriter
class Transfers(AbstractQueries, ABC):
def __init__(self, time_class):
super().__init__(time_class)
self.index_name = CteFTS.INDEX_ES
self.index_id = CteFTS.INDEX_ES_ID
self.mongo = MongoDB()
def get_mongo_query(self, site="", hostname=""):
############ GET SRM ELEMENTS OF THE SITE ############
mongo_query = {CteFTS.REF_FLAVOUR: "SRM"}
if site:
mongo_query.update({CteFTS.REF_SITE: site})
if hostname:
mongo_query.update({CteFTS.REF_HOST: hostname})
return mongo_query
def is_blacklisted(self, src_url, dest_url):
"""
Check if the pfn contains blacklisted elements
:param src_url: 'gsiftp://eoscmsftp.cern.ch//eos/cms/store/temp/user/cc/.../out_offline_Photons_230.root'
:param dest_url:
:return:
"""
black_pfn = [black_pfn for black_pfn in BLACKLIST_PFN if black_pfn in src_url or black_pfn in dest_url]
return black_pfn
def get_user(self, url_pfn):
user = ""
raw_users = re.findall("/user/(.*)/", str(url_pfn))
if raw_users:
user = raw_users[0].split("/")[0].strip("")
user = user.split(".")[0] # e.g. gbakas.9c1d054d2d278c14ddc228476ff7559c10393d8d
if len(raw_users) > 2:
raise Exception("MULTIPLE USERS ON PFN")
return user
def build_query_get_response(self, hostname, direction="", filter_error_kibana=""):
"""
:param hostname: name of the host to analyze e.g. eoscmsftp.cern.ch
:param direction: source_se or dest_se
:param filter_error_kibana: keyword of the error to find e.g. "No such file"
:return:
"""
############ CONSTRUCT THE QUERY ############
kibana_query_failed = "data.vo:cms AND data.file_state:{} AND data.{}:/.*{}.*/ ".format("FAILED", direction,
hostname)
# If an error is specified --> add filter on the query
if filter_error_kibana:
kibana_query_failed += " AND data.{}:/.*\"{}\".*/".format(CteFTS.REF_LOG, filter_error_kibana)
############ QUERY TO ELASTICSEARCH ############
response_failed = self.get_direct_response(kibana_query=kibana_query_failed, max_results=10000)
return response_failed
def analyze_site(self, site="", hostname="", filter_error_kibana=""):
"""
Get json with all the errors grouped by: host, destination/origin, type of error
"""
all_data = {}
mongo_query = self.get_mongo_query(site, hostname)
list_site_info = self.mongo.find_document(self.mongo.vofeed, mongo_query)
if list_site_info:
hosts_name = [info[CteFTS.REF_HOST] for info in list_site_info if info]
############ ITERATE OVER ALL SRMs HOSTS ############
for hostname in hosts_name:
data_host = {}
############ GET DATA ORIGIN AND DESTINATION ############
for direction in [CteFTS.REF_SE_SRC, CteFTS.REF_SE_DST]:
time.sleep(0.1)
response_kibana = self.build_query_get_response(hostname, direction=direction,
filter_error_kibana=filter_error_kibana)
############ GROUP DATA BY ERRORS ############
grouped_by_error = {}
############ ITERATE OVER ALL ERRORS ############
for error in response_kibana:
error_data = deepcopy(error[CteFTS.REF_DATA])
src_url = error_data[CteFTS.REF_PFN_SRC]
dst_url = error_data[CteFTS.REF_PFN_DST]
############ AVOID ERRORS THAT ARE BLACKLISTED ############ç
in_blacklist = self.is_blacklisted(src_url, dst_url)
# Extract useful data
if CteFTS.REF_LOG in error_data.keys() and not in_blacklist:
############ ADD EXTRA DATA ############
src_lfn, _ = get_lfn_and_short_pfn(src_url)
dst_lfn, _ = get_lfn_and_short_pfn(dst_url)
timestamp_hr = timestamp_to_human_utc(error_data[CteFTS.REF_TIMESTAMP])
error_data.update({CteFTS.REF_LFN_SRC: src_lfn, CteFTS.REF_LFN_DST: dst_lfn,
CteFTS.REF_TIMESTAMP_HR: timestamp_hr})
# Clean se
error_data[CteFTS.REF_SE_SRC] = error_data[CteFTS.REF_SE_SRC].split("/")[-1]
error_data[CteFTS.REF_SE_DST] = error_data[CteFTS.REF_SE_DST].split("/")[-1]
############ GROUP THE ERROR ############
grouped_by_error = group_data(grouped_by_error, error_data,
[CteFTS.REF_PFN_SRC, CteFTS.REF_PFN_DST], CteFTS)
if grouped_by_error:
data_host.update({direction: grouped_by_error})
all_data.update({hostname: data_host})
return all_data
def get_column_id(self, num_rows, num_columns_ahead=0, num_rows_ahead=0):
letter_column = chr(64 + num_columns_ahead)
structure_id = "{}{}:{}{}".format(letter_column, num_rows_ahead + 1, letter_column,
num_rows + num_rows_ahead + 1)
return structure_id
def get_sub_table(self, dict_grouped_by_id, list_elements):
list_group = []
for group_id, data in dict_grouped_by_id.items():
new_row = []
dict_data = dict((i, data.count(i)) for i in data)
if dict_data:
for element_value in list_elements:
if element_value in dict_data.keys():
new_row.append(dict_data[element_value])
else:
new_row.append(None)
list_group.append([group_id] + new_row)
return list_group
def write_lfn_txt(self, lfns_file_name, lfns):
text = ""
for error, list_lfns in lfns.items():
text += "*" * 30 + "\n"
text += error.capitalize() + "\n"
text += "*" * 30 + "\n"
for lfn in list_lfns:
text += lfn + "\n"
f = open(lfns_file_name + ".txt", "a")
f.write(text)
f.close()
def results_to_csv(self, dict_results, write_lfns=False):
columns = [CteFTS.REF_TIMESTAMP, CteFTS.REF_TIMESTAMP_HR, CteFTS.REF_LOG,
CteFTS.REF_SE_SRC, CteFTS.REF_SE_DST, CteFTS.REF_PFN_SRC, CteFTS.REF_PFN_DST, CteFTS.REF_LFN_SRC,
CteFTS.REF_LFN_DST, CteFTS.REF_NUM_ERRORS, CteFTS.REF_JOB_ID, CteFTS.REF_FILE_ID]
############ ITERATE OVER ALL SRMs HOSTS ############
for storage_element, se_value in dict_results.items():
time_analysis = round(time.time())
host_analysis = storage_element.replace(".", "-")
file_name = '{}_{}'.format(time_analysis, host_analysis)
writer = pd.ExcelWriter(file_name + ".xlsx", engine='xlsxwriter')
############ GET DATA ORIGIN AND DESTINATION ############
for direction, direction_value in se_value.items():
list_errors, list_groups, list_users, list_other_endpoint = [], [], [], []
group_id = 1
users, endpoints, lfns = {}, {}, {}
if "s" == direction[0]:
other_direction = CteFTS.REF_SE_DST
other_url_direction = CteFTS.REF_PFN_DST
other_lfn = CteFTS.REF_LFN_DST
same_url_direction = CteFTS.REF_PFN_SRC
same_lfn = CteFTS.REF_LFN_SRC
else:
other_direction = CteFTS.REF_SE_SRC
other_url_direction = CteFTS.REF_PFN_SRC
same_url_direction = CteFTS.REF_PFN_DST
other_lfn = CteFTS.REF_LFN_SRC
same_lfn = CteFTS.REF_LFN_DST
############ ITERATE OVER ALL ERROR GROUPS ############
for error_key, error_value in direction_value.items():
users.update({group_id: []})
endpoints.update({group_id: []})
lfns.update({error_key: []})
failed_transfers = 0
############ ITERATE OVER ALL ERRORS ############
for single_error in error_value:
# ADD USER IN LIST
user_site = self.get_user(single_error[same_url_direction])
user_other = self.get_user(single_error[other_url_direction])
if user_site:
users[group_id] += [user_site] * single_error[CteFTS.REF_NUM_ERRORS]
if user_other and user_site != user_other:
logging.error("Different users {} vs {}".format(user_site, user_other))
if user_site not in list_users:
list_users.append(user_site)
# ADD ENDPOINT IN LIST
other_endpoint = single_error[other_direction]
endpoints[group_id] += [other_endpoint] * single_error[CteFTS.REF_NUM_ERRORS]
if other_endpoint not in list_other_endpoint:
list_other_endpoint.append(other_endpoint)
# ADD LIST LFNs
if write_lfns and single_error[same_lfn] and single_error[same_lfn] not in lfns[error_key]:
lfns[error_key].append(single_error[same_lfn])
# ADD ALL THE ERROR INFORMATION
values_columns = [single_error[elem] for elem in columns]
values_columns.append(user_site)
values_columns.append(group_id)
# Row errors table
list_errors.append(values_columns)
# Count total of failed transfers for each group
failed_transfers += single_error[CteFTS.REF_NUM_ERRORS]
# Row table (legend) group errors
list_groups.append([group_id, error_key, len(error_value), failed_transfers])
group_id += 1
# WRITE TXT WITH LFNs
if write_lfns:
lfns_file_name = file_name + "_LFNs_{}".format(direction)
self.write_lfn_txt(lfns_file_name, lfns)
# DF ERRORS
columns_errors = columns + [CteFTS.REF_USER, "group_id"]
num_columns_error = len(columns_errors)
df = pd.DataFrame(list_errors, columns=columns_errors)
df.to_excel(writer, sheet_name=direction, index=False)
column_id_error = self.get_column_id(len(list_errors), num_columns_error)
# DF LEGEND GROUPS
columns_groups = ["group_id", "error_ref", "num_diff_errors", "num_failed_transfers"]
start_column = num_columns_error + CteFTS.SEPARATION_COLUMNS
df_group = pd.DataFrame(list_groups, columns=columns_groups)
df_group.to_excel(writer, sheet_name=direction, startcol=start_column, index=False)
column_id_group = self.get_column_id(len(list_groups), start_column + 1)
# DF USERS
list_group_users = self.get_sub_table(users, list_users)
columns_users = ["group_id"] + list_users
start_column = num_columns_error + CteFTS.SEPARATION_COLUMNS
start_row_users = len(list_groups) + CteFTS.SEPARATION_ROWS
if list_group_users:
df_users = pd.DataFrame(list_group_users, columns=columns_users)
df_users.to_excel(writer, sheet_name=direction, startcol=start_column, startrow=start_row_users,
index=False)
# DF ENDPOINTS
list_group_endpoints = self.get_sub_table(endpoints, list_other_endpoint)
columns_endpoints = ["group_id"] + list_other_endpoint
start_column = num_columns_error + CteFTS.SEPARATION_COLUMNS
start_row = start_row_users + len(list_group_users) + CteFTS.SEPARATION_ROWS
if list_group_endpoints:
df_endpoint = pd.DataFrame(list_group_endpoints, columns=columns_endpoints)
df_endpoint.to_excel(writer, sheet_name=direction, startcol=start_column, startrow=start_row,
index=False)
# COLOR SHEET
worksheet = writer.sheets[direction]
worksheet.conditional_format(column_id_error, {'type': '3_color_scale'})
worksheet.conditional_format(column_id_group, {'type': '3_color_scale'})
writer.save()
if __name__ == "__main__":
time_class = Time(hours=24)
fts = Transfers(time_class)
BLACKLIST_PFN = ["se3.itep.ru", "se01.indiacms.res.in", "cmsio.rc.ufl.edu"]
dict_result = fts.analyze_site(site="T2_ES_IFCA")
fts.results_to_csv(dict_result, write_lfns=True)
```
#### File: cms_support/utils/mongotools.py
```python
import pymongo
import os
from pymongo.collation import Collation
import json
from cms_support.utils.query_utils import Time
from cms_support.sites.vofeed import VOFeed
class MongoDB:
def __init__(self):
client = pymongo.MongoClient('mongodb://' + os.environ["MONGODB_HOST"])
# DATABASE
self.db = client.sitesupport
# COLLECTIONS
self.vofeed = self.db.vofeed
@staticmethod
def insert_one_document(collection, dict_to_insert):
collection.insert_one(dict_to_insert)
@staticmethod
def insert_list_documents(collection, list_to_insert, delete_collection=False):
if delete_collection:
collection.drop()
collection.insert(list_to_insert)
@staticmethod
def find_document(collection, query, project=None):
# Collations --> sensitive find cases
if not project:
project = {"_id": 0}
cursor = collection.find(query, project).collation(Collation(locale="en", strength=2))
return [row for row in cursor]
@staticmethod
def find_unique_fields(collection, unique_field):
"""
list of unique fields []
"""
return collection.distinct(unique_field)
@staticmethod
def update_document(collection, find_by, dict_edit):
"""
{'id':'147862'},{"$set":{"date_creation": "2020-07-15 00:00:00"}}
"""
# Collations --> sensitive find cases
query = {"$set": dict_edit}
cursor = collection.update(find_by, query, True)
class SiteInfo:
def __init__(self):
self.mongo = MongoDB()
time = Time(hours=24)
self.vofeed = VOFeed(time)
def write_json_all_resources(self):
algo = self.mongo.find_document(self.mongo.vofeed, {})
with open("sites_resources.json", "w") as outfile:
json.dump(algo, outfile)
def update_mongo_vofeed(self, site_name=""):
all_resources = self.vofeed.get_site_resources("/.*{}.*/".format(site_name))
self.mongo.insert_list_documents(self.mongo.vofeed, all_resources, delete_collection=True)
def get_resource_filtered(self, flavour="", hostname="", site=""):
query = {}
if flavour:
query.update({"flavour": {'$regex': flavour}})
if hostname:
query.update({"hostname": {'$regex': hostname}})
if site:
query.update({"site": {'$regex': site}})
return self.mongo.find_document(self.mongo.vofeed, query)
def get_list(self, field=""):
"""
:param field: site|hostname|flavour
:return:
"""
unique_list = []
if field:
unique_list = self.mongo.find_unique_fields(self.mongo.vofeed, field)
return unique_list
if __name__ == "__main__":
# mongo = MongoDB()
# query_ex = {"qty": 5}
# algo = mongo.find_document(mongo.vofeed, query_ex)
site_info = SiteInfo()
# site_info.update_mongo_vofeed()
print(site_info.write_json_all_resources())
```
#### File: cms_support/utils/site_utils.py
```python
import re
import pandas as pd
import time
from cms_support.utils.constants import logging
from abc import ABC
from cms_support.utils.query_utils import AbstractQueries
from cms_support.utils.constants import CteSAM as CteSAM
def get_type_resource(target):
type_resource = ""
if target:
if re.search("t[0-9]_*", target.lower()):
type_resource = "site"
if re.search("[a-z]\.", target.lower()):
type_resource = "hostname"
if re.search("srm|xrootd|xrd|.*ce.*", target.lower()):
type_resource = "flavour"
return type_resource
def get_resource_from_target(target):
site, host, flavour = "", "", ""
type_resource = get_type_resource(target)
if type_resource == "site":
site = target
elif type_resource == "hostname":
host = target
elif type_resource == "flavour":
host = flavour
return site, host, flavour
def write_excel(rows, columns, name_ref):
if columns and rows:
timestamp_now = round(time.time())
file_name = "{}_{}".format(timestamp_now, name_ref)
writer = pd.ExcelWriter(file_name + ".xlsx", engine='xlsxwriter')
df_group = pd.DataFrame(rows, columns=columns)
df_group.to_excel(writer, index=False)
writer.save()
logging.info("Excel file created successfully")
else:
logging.info("No results found. The Excel file was not created.")
class AbstractCMSSST(AbstractQueries, ABC):
def __init__(self, time_class, metric, str_freq="15min", target="", blacklist_regex=""):
super().__init__(time_class, target, blacklist_regex)
self.index_name = CteSAM.INDEX_ES
self.index_id = CteSAM.INDEX_ES_ID
self.metric = metric
self.str_freq = str_freq
self.basic_kibana_query = "metadata.path:" + self.metric + self.str_freq
@staticmethod
def get_ref_flavour(flavour):
ref_flavour = flavour
if "CE" in flavour:
ref_flavour = "CE"
if "XROOTD" in flavour:
ref_flavour = "XRD"
return ref_flavour
``` |
{
"source": "4R7I5T/cakechat",
"score": 3
} |
#### File: quality/metrics/ranking.py
```python
import numpy as np
from sklearn.metrics import average_precision_score
def compute_average_precision(expected_answers, weighted_actual_answers, top):
actual_responses, actual_weights = zip(*weighted_actual_answers.items())
expected_labels = [int(response in expected_answers) for response in actual_responses][:top]
actual_weights = actual_weights[:top]
if any(expected_labels):
score = average_precision_score(expected_labels, actual_weights)
else:
score = 0.0
return score
def compute_recall_k(expected_answers, weighted_actual_answers, k):
sorted_k_responses = sorted(
weighted_actual_answers.keys(), key=lambda response: weighted_actual_answers[response], reverse=True)[:k]
recall_k = len(set(sorted_k_responses) & set(expected_answers)) / float(len(expected_answers))
return recall_k
def compute_retrieval_metric_mean(metric_func, questions_answers, questions_to_weighted_actual_answers, top_count):
if top_count <= 0:
raise ValueError('top_count should be a natural number')
return np.mean([
metric_func(answers, questions_to_weighted_actual_answers[question], top_count)
for question, answers in questions_answers.items()
])
```
#### File: cakechat/utils/plotters.py
```python
import os
from collections import Counter
from tensorboard_logger import Logger as TensorboardLogger
from cakechat.utils.files_utils import get_persisted, serialize
class TensorboardMetricsPlotter(object):
def __init__(self, log_dir):
self._log_dir = log_dir
self._writers = {}
self._steps_path = os.path.join(log_dir, 'steps')
self._steps = get_persisted(dict, self._steps_path)
def plot(self, model_name, metric_name, metric_value):
if model_name not in self._writers:
self._writers[model_name] = TensorboardLogger(os.path.join(self._log_dir, model_name))
if model_name not in self._steps:
self._steps[model_name] = Counter()
self._writers[model_name].log_value(metric_name, metric_value, step=self._steps[model_name][metric_name])
self._steps[model_name][metric_name] += 1
serialize(self._steps_path, self._steps)
``` |
{
"source": "4ra5oOs4r4/Loeya",
"score": 3
} |
#### File: Loeya/cogs/UserInfo.py
```python
import datetime
import random
import discord
from discord.ext import commands
from datetime import datetime
# -------------------- Cogs --------------------
class userinfo(commands.Cog):
def __init__(self, client):
self.client = client
# -------------------- Launch --------------------
@commands.Cog.listener()
async def on_ready(self):
print('Loaded User Info')
# -------------------- User Command --------------------
@commands.command(alias=["userinfo"])
async def user(self, ctx, member : discord.Member=None):
if member == None:
member = ctx.message.author
userid = member.id
date = member.created_at.strftime('[%Y/%b/%d - %X %p]')
joindate = member.joined_at.strftime('[%Y/%b/%d - %X %p]')
pfp = member.avatar_url
embed=discord.Embed(color=discord.Colour.random())
embed.set_author(name=f"{member}'s Info")
embed.set_thumbnail(url=pfp)
embed.add_field(name="🆔 User's ID:", value=userid, inline=False)
embed.add_field(name="📅 User's Creation Date:", value=date, inline=False)
embed.add_field(name="👑 User's Server Join Date:", value=joindate, inline=False)
embed.set_footer(text="Bot is created by: SamTheNoob#8698")
await ctx.send(embed=embed)
# -------------------- Cogs Setup --------------------
def setup(client):
client.add_cog(userinfo(client))
# -------------------- Comment --------------------
```
#### File: 4ra5oOs4r4/Loeya/main.py
```python
from discord.ext.commands.core import command
import datetime
import os
import discord
from itertools import cycle
from discord.ext import commands, tasks
from TOKEN import token_id
import random
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
client = commands.Bot(command_prefix='?', case_insensitive=True, intents = discord.Intents.all())
status = cycle(['ArasoOsara', 'Kids Die Inside'])
activity = cycle([discord.ActivityType.listening, discord.ActivityType.watching])
def is_it_me(ctx):
return ctx.author.id == 9<PASSWORD>
@client.event
async def on_ready():
guild = client.get_guild(939078117362896906)
change_status.start()
print(f"{client.user.name} is now online.")
guild = client.get_guild(939078117362896906)
for invalid_guilds in list(filter(lambda x: x.name.title().startswith('➖〉Room #'), guild.voice_channels)):
if len(invalid_guilds.members) == 0:
await invalid_guilds.delete()
else:
pass
@client.event
async def on_message(message):
RESPONSES = ["Don't mention me!", "Stop mentioning me!", "I won't repeat it, DON'T MENTION ME!!!", "Can you not mention my name?", "I already have a passport, STOP MENTIONING MY NAME!!!", "STFU before I fuck you up, AND DON'T EVER CALL MY NAME AGAIN!"]
CHARACTERS =["Stop spamming characters kid...", "Don't spam characters.", "Can you not?"]
ASKED = ["I asked.", "Your Mom?", "Dogs don't ask, They listen.", "Excuse me? I DID!", f"?ban {message.author.mention} ???", "Don't need permission...", "If you didn't ask, Then why are you listening?", "Who said you were apart of the conversation?!", "You just asked who asked...", "Your lost dad", "100% Not your dad.", "It's not a Q&A."]
for word in message.content.split(" "):
temp_l=""
i=0
max_i = 5
for l in word:
if i >= max_i:
return await message.reply(f'{random.choice(CHARACTERS)}')
if l == temp_l:
i += 1
temp_l = l
print(fuzz.ratio("who asked", message.content))
if message.author.bot:
return
if fuzz.ratio("who asked", message.content) in range(50, 101) or fuzz.ratio("WHO ASKED", message.content) in range(50, 101):
await message.reply(f'{random.choice(ASKED)}')
await client.process_commands(message)
if "WhoAsked" in message.content or "Who Asked" in message.content:
await message.reply(f'{random.choice(ASKED)}')
if client.user in message.mentions or "Loeya" in message.content:
await message.reply(f'{random.choice(RESPONSES)}')
@client.command()
async def ping(ctx):
await ctx.send(f'Pong! I response in {round(client.latency * 1000, 2)} milliseconds!')
@tasks.loop(seconds=10)
async def change_status():
await client.change_presence(activity=discord.Activity(type=(next(activity)), name=(next(status))))
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
client.run(token_id)
``` |
{
"source": "4RandomProgrammer/Python",
"score": 4
} |
#### File: Coursera/ExsAvulsos/Fat.py
```python
def main():
while True:
numero = int(input("Digite um numero: "))
if numero < 0:
break
numero = fat(numero)
print(numero)
def fat(numero):
var = 1
while numero != 1:
var = numero * var
numero -= 1
return var
main()
```
#### File: Coursera/Lista 4/Vogais.py
```python
def vogal(vog):
evogal = True
if vog == 'a' or vog == 'A':
return evogal
elif vog == 'e' or vog == 'E':
return evogal
elif vog == 'i' or vog == 'I':
return evogal
elif vog == 'o' or vog == 'O':
return evogal
elif vog == 'u' or vog == 'U':
return evogal
else:
evogal = False
return evogal
```
#### File: Coursera/Lista 6/Maior elemento.py
```python
def maior_elemento(lista):
maior = lista[0]
for i in lista:
if maior < i:
maior = i
return maior
```
#### File: Listas de Python/Lista 2/L02EX03.py
```python
def desiverter(numero):
numero = (numero % 10) * 10 + numero // 10
return numero
#var
DiaEntrega = 0
MesEntrega = 0
DiaValidade = 0
MesValidade = 0
DiaAtual = 0
MesAtual = 0
i = 0
DiaEntrega = int(input())
MesEntrega = int(input())
DiaValidade = int(input())
MesValidade = int(input())
DiaEntrega = desiverter(DiaEntrega)
MesEntrega = desiverter(MesEntrega)
DiaValidade = desiverter(DiaValidade)
MesValidade = desiverter(MesValidade)
MesAtual = MesEntrega
DiaAtual = DiaEntrega + 3
if MesAtual > MesValidade or DiaAtual > DiaValidade:
print(False)
else:
print(True)
``` |
{
"source": "4RCAN3/PyAlgo",
"score": 4
} |
#### File: pyalgo/maths/catalan_numbers.py
```python
def binomial_coeff(n: int, k: int):
if (k > n - k):
k = n - k
result = 1
for i in range (k):
result *= (n - i)
result /= (i + 1)
return result
def catalan(n: int):
'''
Using binomial coefficients
calculating the nth
catalan number
'''
result = binomial_coeff(2 * n, n)
answer = int(result / (n + 1))
return answer
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/maths/gcd.py
```python
def gcd(x: int, y: int):
'''
Calculating GCD of x and y
using Euclid's algorithm
'''
if (x > y):
while (y != 0):
x, y = y, x % y
return x
else:
while (x != 0):
y, x = x, y % x
return y
def lcm(x: int, y: int):
'''
Calculating LCM of x and y
using the formula
LCM * GCD = x * y
'''
result = (x * y) // gcd(x, y)
return result
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/maths/gray_code.py
```python
def gray_code(n: int):
if (n <= 0):
return
result = []
result.append("0")
result.append("1")
num1 = 2
while (True):
if (num1 >= 1 << n):
break
for j in range (num1 - 1, -1, -1):
result.append(result[j])
for j in range (num1):
result[j] = "0" + result[j]
for j in range (num1, 2 * num1):
result[j] = "1" + result[j]
num1 = num1 << 1
return result
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/maths/prime.py
```python
def prime(n: int):
'''
Checking if the number has any
factors in the range [2, sqrt(n)]
else it is prime
'''
if (n == 2):
return True
result = True
for i in range (2, int(n ** 0.5)):
if (n % i == 0):
result = False
break
return result
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/maths/totient_function.py
```python
def totient(n: int):
result = n
num = 2
while (num * num <= n):
if (n % num == 0):
while (n % num == 0):
n = int(n / num)
result -= int(result / num)
num += 1
if (n > 1):
result -= int(result / n)
return result
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/search/binary_search.py
```python
def binary_search(arr: list, start: int, end: int, key):
'''
Binary search implementation
using iterative method
'''
while (start <= end):
mid = int(start + ((end - start) / 2))
if (arr[mid] < key):
start = mid + 1
elif arr[mid] > key:
end = mid - 1
else:
return mid
return -1
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/sort/bubble_sort.py
```python
def bubble_sort(arr: list):
'''
Time Complexity:
- O(n * n) : worst
- O(n) : best
'''
for i in range (len(arr)):
swap = False
for j in range (0, len(arr) - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
swap = True
if (swap == False):
break
return arr
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/sort/counting_sort.py
```python
def counting_sort(arr: list):
max_element = int(max(arr))
min_element = int(min(arr))
range_elements = max_element - min_element + 1
count_arr = [0 for i in range (range_elements)]
output_arr = [0 for i in range (len(arr))]
for i in range (0, len(arr)):
count_arr[arr[i] - min_element] += 1
for i in range (1, len(arr)):
count_arr[i] += count_arr[i - 1]
for i in range (len(arr) - 1, -1, -1):
output_arr[count_arr[arr[i] - min_element] - 1] = arr[i]
count_arr[arr[i] - min_element] -= 1
for i in range (0, len(arr)):
arr[i] = output_arr[i]
return arr
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/sort/cycle_sort.py
```python
def cycle_sort(arr: list):
writes = 0
for cycleStart in range(0, len(arr) - 1):
item = arr[cycleStart]
pos = cycleStart
for i in range(cycleStart + 1, len(arr)):
if (arr[i] < item):
pos += 1
if (pos == cycleStart):
continue
while (item == arr[pos]):
pos += 1
arr[pos], item = item, arr[pos]
writes += 1
while (pos != cycleStart):
pos = cycleStart
for i in range(cycleStart + 1, len(arr)):
if (arr[i] < item):
pos += 1
while (item == arr[pos]):
pos += 1
arr[pos], item = item, arr[pos]
writes += 1
return arr
'''
PyAlgo
<NAME>
'''
```
#### File: pyalgo/sort/merge_sort.py
```python
def merge_sort(arr: list):
if len(arr) > 1:
mid = len(arr) // 2
L = arr[:mid]
R = arr[mid:]
merge_sort(L)
merge_sort(R)
i, j, k = 0, 0, 0
while (i < len(L) and j < len(R)):
if (L[i] < R[j]):
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while (i < len(L)):
arr[k] = L[i]
i += 1
k += 1
while (j < len(R)):
arr[k] = R[j]
j += 1
k += 1
return arr
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/test/test_exponential_search.py
```python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import unittest
from search.exponential_search import exponential_search
class TestExponentialsearch(unittest.TestCase):
def test_exponential_search(self):
result = exponential_search([2, 3, 4, 5], 0, 4, 4)
self.assertEqual(result, 2)
if __name__ == "__main__":
unittest.main()
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/test/test_fibonacci.py
```python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import unittest
from maths.fibonnaci_numbers import fibonacci
class TestFibonacci(unittest.TestCase):
def test_fibonacci(self):
result = fibonacci(6)
self.assertEqual(result, 8)
if __name__ == "__main__":
unittest.main()
'''
PyAlgo
<NAME>, 2021
'''
```
#### File: pyalgo/test/test_prim_mst.py
```python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import unittest
from graph.mst.prim_mst import *
class TestPrimmst(unittest.TestCase):
def test_prim_mst(self):
g = Graph(5)
g.graph = [ [0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]
]
result = g.prim_mst()
self.assertEqual(result, {(0, 1): 2, (1, 2): 3, (0, 3): 6, (1, 4): 5})
if __name__ == "__main__":
unittest.main()
'''
PyAlgo
<NAME>, 2021
'''
``` |
{
"source": "4RCAN3/Scrapia-World",
"score": 3
} |
#### File: novelProfiler/db/jsHelpScripts.py
```python
class JSHelpScripts:
def openAccordian(self, accordianText: str, tagName: str='span') -> None:
return ''.join(
(
f"let spanList = document.getElementsByTagName('{tagName}');",
"let c = 0;", "for(let span of spanList){",
f"if(span.innerText.startsWith('{accordianText}'))", "{",
"if(c != 0){", "span.click();", "}c++;","}","}"
)
)
def clickElementWithInnerTextS(
self, tagName: str, innerText: str, toLowerCase: bool = True
) -> str:
"""Script to click an element which has innertext `S`"""
def appendLowerCase(originalStr: str) -> str:
if toLowerCase:
originalStr += ".toLowerCase()"
return originalStr
return "".join(
(
"let elementList=document",
f""".getElementsByTagName("{tagName}");""",
"for(let element of elementList){",
appendLowerCase("if(element.innerText"),
f"""=="{innerText}")""",
"{",
"element.click();",
"}",
"}",
)
)
def clickFirstElementFromElementList(self, tagName: str) -> str:
return f"""document.getElementsByTagName("{tagName}")[0].click();"""
def clickElementStartingWithStrS(self, tagName: str, startingTxt: str) -> str:
return "".join(
(
"let elementList=document",
f""".getElementsByTagName("{tagName}");""",
"for(let element of elementList){",
f"""if(element.innerText.startsWith("{startingTxt}"))""",
"{element.click();}",
"}",
)
)
def clickFrmSecElementStartingWithStrS(self, tagName: str, startingTxt: str) -> str:
return "".join(
(
"let elementList=document",
f""".getElementsByTagName("{tagName}");""",
"for(let element of elementList){",
f"""if(element.innerText.startsWith("{startingTxt}"))""",
"{",
"if(element)",
"element.click();}",
"}",
)
)
def getXpathStrFrClsNames(self, tagName: str, *styleClassNames: str) -> str:
returnStr = f"//{tagName}["
repeatingStr = lambda s: f"contains(@class, '{s}')"
numOfClasses = len(styleClassNames)
c = 0
while c < numOfClasses - 1:
returnStr += repeatingStr(styleClassNames[c]) + " and "
c += 1
returnStr += f"{repeatingStr(styleClassNames[c])}]"
return returnStr
def convert2UglyJS(self, jsScript: str) -> str:
"""Convert a proper javascript script into an ugly script with no spaces"""
symbols = jsScript.replace("\n", " ").split(" ")
symbols = [symbol for symbol in symbols if symbol not in (" ", "")]
js = ""
n = len(symbols) - 1
for i in range(0, n):
currSymbol = symbols[i]
nextSymbol = symbols[i + 1]
if (not currSymbol[-1].isalpha()) or (not nextSymbol.isalpha()):
js += currSymbol + nextSymbol
else:
js += f"{currSymbol} {nextSymbol}"
return js
``` |
{
"source": "4RCAN3/Tweetle",
"score": 3
} |
#### File: tweetle/packages/ProjPySQL.py
```python
import mysql.connector
from datetime import datetime
import tweetle.packages.commands as commands
#Establishing connection and creating cursor
class db():
def __init__(self, user):
sql_user = commands.Commands(user).read_accs()[4]
pw = commands.Commands(user).read_accs()[5]
self.mydb = mysql.connector.connect(host="localhost", user= sql_user,password=pw, port = 3306)
mycursor = self.mydb.cursor(buffered=True)
try:
mycursor.execute("USE tweepy")
except mysql.connector.Error as err:
mycursor.execute("CREATE DATABASE tweepy")
mycursor.execute("USE tweepy")
try:
mycursor.execute("SELECT * FROM TweetDB")
except mysql.connector.Error as err:
self.create(mycursor)
mycursor.close()
#Table creation
def create(self, mycursor):
print('here')
mycursor.execute("CREATE Table TweetDB (SrlNo int NOT NULL AUTO_INCREMENT PRIMARY KEY, TweetID bigint, Keyword varchar(500), _Timestamp datetime, Author varchar(50))")
self.mydb.commit()
#Data insertion
def Insert_Data(self, TweetList, keyword):
mycursor = self.mydb.cursor(buffered=True)
insert_query = "INSERT INTO TweetDB (TweetID,Keyword,_Timestamp,Author) VALUES(%s,%s,%s,%s)"
records = (TweetList['id'], keyword, TweetList['timestamp'], TweetList['tweet_author'])
mycursor.execute(insert_query, records)
self.mydb.commit()
mycursor.close()
#Queries
#Ordering by timestamp
def orderbytime(self):
mycursor = self.mydb.cursor(buffered=True)
mycursor.execute("SELECT * FROM TweetDB ORDER BY _Timestamp ASC")
self.mydb.commit()
res = mycursor.fetchall()
mycursor.close()
return res
#Fetching top x rows
def selecttop(self, x):
mycursor = self.mydb.cursor(buffered=True)
query = '''SELECT * FROM TweetDB
LIMIT {}'''.format(x)
mycursor.execute(query)
self.mydb.commit()
res = mycursor.fetchall()
mycursor.close()
return res
#Emptying the whole table
def clean(self):
mycursor = self.mydb.cursor()
mycursor.execute("TRUNCATE TweetDB")
self.mydb.commit()
#Returning xth row
def row(self, x):
mycursor = self.mydb.cursor(buffered=True)
query = '''SELECT * FROM TweetDB
LIMIT {}'''.format(x)
mycursor.execute(query)
self.mydb.commit()
res = mycursor.fetchall()
res = res[len(res) - 1]
mycursor.close()
return res
def all_data(self):
mycursor = self.mydb.cursor()
mycursor.execute("SELECT * FROM TweetDB")
res = mycursor.fetchall()
mycursor.close()
return res
``` |
{
"source": "4rchib4ld/karton-unpacker-modules",
"score": 2
} |
#### File: karton-unpacker-modules/iceLoader/iceLoader.py
```python
import logging
import argparse
import json
from karton.core import Task, Resource
from qiling import *
import yara
import pefile
import hexdump
__author__ = '4rchib4ld'
__version__ = '1.0.0'
yara_rule_iceloader = """
rule iceloaderpacker {
meta:
author = "4rchib4ld"
description = "Iceloader"
reference = "https://4rchib4ld.github.io/blog/HoneymoonOnIceloader/"
type = "malware.loader"
created = "2021-05-14"
os = "windows"
tlp = "white"
rev = 1
strings:
$obfuscationCode = {89 DA [0-7] B? FF 44 30 [0-17] C2 44 30 [0-8] 20 ?? 08 D0 [0-8] 88 84} // This code is used for deobfuscation
condition:
uint16(0) == 0x5a4d and filesize < 800KB and // We only want PE files
all of them
}
"""
log = logging.getLogger(__name__) # Setup Logging
class KartonUnpackerModule():
"""
Unpacks IceLoader executables using the Qiling Framework
"""
def __init__(self, sample, config):
self.enabled = self.yara_check(sample)
self.config = config
self.data = sample.content
if self.config['rootfs'] is None:
log.warn("rootfs is disabled, iceloader unpacker disabled")
self.enabled = False
if self.config['debug'] is True:
logging.basicConfig(level=logging.DEBUG)
self.verbose = 4
else:
logging.basicConfig(level=logging.INFO)
self.verbose = 0
def yara_check(self, sample) -> bool:
"""
Checking if the sample matches the yara rule. If it does, get the code used for encryption
"""
self.data = sample.content
self.name = sample.name
yarac = yara.compile(source=yara_rule_iceloader)
matches = yarac.match(data=self.data)
if matches:
start = int(matches[0].strings[0][0])
end = start + len(matches[0].strings[0][2])
self.obfuscationCode = sample.content[start:end-2] # Removing the last two bytes because I use them as markor for the end of the code
return True
return False
def extractPayloadFromSect(self, pe, sectionName):
"""
Extracting the payload from the pe section. Different routines because of the different section that can be used
"""
for section in pe.sections:
if sectionName == section.Name:
if ".rdata" in str(sectionName):
startOfDebugDirectory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_DEBUG"]].VirtualAddress
rdata = section.get_data()
RdataVirtualAddress = section.VirtualAddress
endOfPayload = startOfDebugDirectory - RdataVirtualAddress
return rdata[:endOfPayload]
data = section.get_data()
log.debug(f"Size of extracted payload section : {len(data)}")
return data
def extractDecryptionSect(self, pe, sectionName):
"""
Extracting the payload from the pe section
"""
for section in pe.sections:
if sectionName == section.Name:
data = section.get_data()
endoffset = 16400 # hardcoded value, but it's always the same
extractedValue = int.from_bytes(data[:4], 'little')
data = data[16:endoffset]
log.debug(f"Size of the extracted decryption section : {len(data)}\nExtracted value : {extractedValue}")
return data, extractedValue
def payloadDecode(self, data):
"""
Decoding the payload. Making it ready for the next stage
"""
decodedData = bytearray()
for i in range(0, len(data), 2):
decodedData.append(data[i])
log.debug(f"Size decoded payload section: {len(decodedData)}")
return decodedData
def payloadDecrypt(self, decodedPayload, decrementationCounter):
"""
Starting from the end for the decodedPayload, and a byte every n bytes. Then it loops again, but from len(data)-1 and so on
"""
payload = bytearray()
count = 0
scount = 0
payloadSize = len(decodedPayload) - 1
i = payloadSize
while scount != decrementationCounter:
try:
payload.append(decodedPayload[i])
except:
pass
i -= decrementationCounter
count = count + 1
if count == 512:
count = 0
scount += 1
i = payloadSize - scount
log.debug(f"Size of the decrypted payload section : {len(payload)}")
return payload[::-1]
def gettingObfuscationCode(file, yaraRule):
"""
Retrieving the code used for obfuscation using a Yara rule
"""
rules = yara.compile(filepath=yaraRule)
f = open(file, "rb")
matches = rules.match(data=f.read())
f.close()
if matches:
obfuscationCode = matches[0].strings[0][2]
else:
obfuscationCode = 0
log.debug(f"Obfuscation code : {obfuscationCode}")
return obfuscationCode
def runObfuscationCode(self, obfuscatedPayload):
"""
Treat the obfuscation code as a shellcode (could have used the code offset instead) and run it in a loop
"""
ql = Qiling(code=self.obfuscationCode,
rootfs=self.config['rootfs'] + '/x8664_windows',
ostype="windows",
archtype="x8664",
multithread=False,
console=False,
verbose=self.verbose
)
try:
deobfuscatedPayload = bytearray()
count = 0
key = 1 # Maybe this will change ?
for byte in obfuscatedPayload:
if count == 512:
key += 1
count = 0
# initialize machine registers
ql.reg.al = byte
ql.reg.bl = key
# Running the code
ql.run()
# Extracting the value
result = ql.reg.al
count += 1
deobfuscatedPayload.append(result)
except Exception as error:
log.error(error)
log.debug(f"Size of deobfuscated payload : {len(deobfuscatedPayload)}")
return deobfuscatedPayload[::1]
def decryptSecondStage(self, encryptedPayload, dataSect):
"""
The final decryption. Loop throught the data section and take two bytes at a time, adding them and getting the corresponding char in the decrypted payload from .rdata
"""
secondStage = bytearray()
count = 0
step = 512
padding = 0
for i in range(0, len(encryptedPayload) * 2, 2):
try:
currentChar = encryptedPayload[int.from_bytes(bytes([dataSect[i % len(dataSect)]]) + bytes([dataSect[(i+1) % len(dataSect)]]), "little") + padding]
secondStage.append(currentChar)
except IndexError:
pass
count += 1
if count == step:
padding += step
count = 0
secondStage = bytes(secondStage) # Bytearray -> bytes, needed by Karton
log.debug(f"Size of the decrypted second stage : {len(secondStage)}")
return secondStage
def selectingSections(self, pe):
"""
Sorting the biggest region of the file. The biggest is the packed executable, the second one the data used for decryption
"""
sort = {}
for section in pe.sections:
if not ".text" in str(section.Name) and not ".data" in str(section.Name): # we don't care about .text
sort[section.Name] = section.Misc_VirtualSize
if ".data" in str(section.Name):
dataSectionSize = section.Misc_VirtualSize
dataSectionName = section.Name
sortedSection = sorted(sort.items(), key=lambda x: x[1], reverse=True)
payloadSection = sortedSection[0][0]
payloadSectionSize = sortedSection[0][1]
log.debug(f"Biggest section is : {payloadSection} with size {payloadSectionSize}")
if dataSectionSize > (payloadSectionSize * 5): #means that everything is in .data
log.debug("Everything is in .data")
dataSect = self.extractPayloadFromSect(pe, dataSectionName)
extractedPayload, extractedDecryptionSection, extractedValue = self.scanningData(dataSect)
else:
extractedPayload = self.extractPayloadFromSect(pe, payloadSection)
extractedDecryptionSection, extractedValue = self.extractDecryptionSect(pe, dataSectionName)
return extractedPayload, extractedDecryptionSection, extractedValue
def scanningData(self, data):
"""
Sometimes everything is in the .data section, so we need to parse it in order to get the data we want. I use a Yara rule in order to find the markor
"""
markorYara = """
rule findMarkor
{
strings:
$markor = { 00 ?? ?? 00 ?? ?? 00 00 00 00 00 00 00 00 00 }
condition:
all of them
}
"""
yarac = yara.compile(source=markorYara)
matches = yarac.match(data=data)
extractedValue = int.from_bytes(matches[0].strings[0][2][:4], 'little')
offset = matches[0].strings[0][0]
payload = data[:offset]
dataSect = data[offset+16:offset+16400] #skipping the 16bytes that are used as a delimeter
log.debug(f"extracted payload size : {payload}\n extracted data section size : {dataSect} \n extracted value : {extractedValue}")
return payload, dataSect, extractedValue
def main(self) -> list:
# Perform Operations on self.data to unpack the sample
pe = pefile.PE(data = self.data)
extractedPayload, extractedDecryptionSection, extractedValue = self.selectingSections(pe)
decrementationCounter = extractedValue // 512 # that's how it is calculated
obfuscatedPayload = self.payloadDecrypt(self.payloadDecode(extractedPayload), decrementationCounter)
deobfuscatedPayload = self.runObfuscationCode(obfuscatedPayload)
unpackedExecutable = self.decryptSecondStage(deobfuscatedPayload, extractedDecryptionSection)
task = Task(
headers={
'type': 'sample',
'kind': 'runnable',
'stage': 'recognized'
},
payload={
'parent': Resource(name='sample', content=self.data), # Set Parent Data (Packed Sample)
'sample': Resource(name='unpacked', content=unpackedExecutable) # Set Child Data (Unpacked Sample)
}
)
# A list of tasks must be returned, as there can be more than one unpacked child
return [task]
if __name__ in '__main__':
parser = argparse.ArgumentParser(
prog='example.py',
description=f'Karton Unpacker Service Example Module v{__version__} (CLI Test Utility)',
epilog=f'Author: {__author__}'
)
parser.add_argument('-i','--input', help='Input File', type=str, required=True)
parser.add_argument('--rootfs', help='RootFS', type=str, default=None, required=True)
parser.add_argument('--debug', help='Debug', action="store_true", default=False, required=False)
args = parser.parse_args()
f = open(args.input, 'rb')
sample = Resource(name=args.input, content=f.read())
f.close()
config = {
'rootfs': args.rootfs,
'debug': args.debug
}
module = KartonUnpackerModule(sample, config)
if module.enabled is True:
task = module.main()
data = json.loads(str(task))
print(json.dumps(data, indent=4))
``` |
{
"source": "4rdparty/my_examples",
"score": 2
} |
#### File: 4rdparty/my_examples/repos.bzl
```python
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# libuv
########################################################################
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
def repos(external = True, repo_mapping = {}):
if "rules_foreign_cc" not in native.existing_rules():
http_archive(
name = "rules_foreign_cc",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.2.0.tar.gz",
sha256 = "d54742ffbdc6924f222d2179f0e10e911c5c659c4ae74158e9fe827aad862ac6",
strip_prefix = "rules_foreign_cc-0.2.0",
repo_mapping = repo_mapping,
)
if external and "com_github_3rdparty_bazel_rules_leveldb" not in native.existing_rules():
git_repository(
name = "com_github_3rdparty_bazel_rules_leveldb",
remote = "https://github.com/3rdparty/bazel-rules-leveldb",
commit = "a735c49c87f239b18ccbe7cfcfcd620566106202",
shallow_since = "1620093785 -0700",
repo_mapping = repo_mapping,
)
if external and "libuv" not in native.existing_rules():
http_archive(
name = "libuv",
build_file_content = all_content,
sha256 = "371e5419708f6aaeb8656671f89400b92a9bba6443369af1bb70bcd6e4b3c764",
strip_prefix = "libuv-1.42.0",
url = "https://github.com/libuv/libuv/archive/refs/tags/v1.42.0.tar.gz",
repo_mapping = repo_mapping,
)
if external and "libcurl" not in native.existing_rules():
http_archive(
name = "libcurl",
build_file_content = all_content,
sha256 = "ffa8f79f68dd77b08987ce16acd1f292875df8ab3bf7e3654f98d62b445ebd9a",
strip_prefix = "curl-curl-7_78_0",
url = "https://github.com/curl/curl/archive/refs/tags/curl-7_78_0.tar.gz",
repo_mapping = repo_mapping,
)
if external and "com_github_gflags_gflags" not in native.existing_rules():
http_archive(
name = "com_github_gflags_gflags",
url = "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz",
sha256 = "34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf",
strip_prefix = "gflags-2.2.2",
repo_mapping = repo_mapping,
)
if external and "com_github_google_glog" not in native.existing_rules():
http_archive(
name = "com_github_google_glog",
sha256 = "eede71f28371bf39aa69b45de23b329d37214016e2055269b3b5e7cfd40b59f5",
strip_prefix = "glog-0.5.0",
url = "https://github.com/google/glog/archive/refs/tags/v0.5.0.tar.gz",
repo_mapping = repo_mapping,
)
if external and "gtest" not in native.existing_rules():
http_archive(
name = "gtest",
url = "https://github.com/google/googletest/archive/release-1.10.0.tar.gz",
sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
strip_prefix = "googletest-release-1.10.0",
repo_mapping = repo_mapping,
)
``` |
{
"source": "4refr0nt/iot-manager-demo",
"score": 2
} |
#### File: python/sensor/la15.py
```python
import json
import threading
# import os
frame = "STEEL" # BLACK_METAL METAL SHINY_METAL BRASS STEEL CHROME GOLD ANTHRACITE TILTED_GRAY TILTED_BLACK GLOSSY_METAL
color = "RAITH" # RED GREEN BLUE ORANGE YELLOW CYAN MAGENTA WHITE GRAY BLACK RAITH GREEN_LCD JUG_GREEN
bgColor = "CARBON" # DARK_GRAY SATIN_GRAY LIGHT_GRAY WHITE BLACK BEIGE BROWN RED GREEN BLUE ANTHRACITE MUD PUNCHED_SHEET CARBON STAINLESS BRUSHED_METAL BRUSHED_STAINLESS TURNED
lcd = "BLUE_BLUE" # BEIGE BLUE ORANGE RED YELLOW WHITE GRAY BLACK GREEN BLUE2 BLUE_BLACK BLUE_DARKBLUE BLUE_GRAY STANDARD STANDARD_GREEN BLUE_BLUE RED_DARKRED DARKBLUE LILA BLACKRED DARKGREEN AMBER LIGHTBLUE SECTIONS
led = "RED_LED" # RED_LED GREEN_LED BLUE_LED ORANGE_LED YELLOW_LED CYAN_LED
class LoadAvg15:
counter = 0
t = 0
config = {
'descr' : "Load average",
'widget' : "steel",
'widgetConfig' : {
'titleString' : "Load average 15 min",
'unitString' : "%",
'width' : "auto",
'height' : 100,
'type' : "Linear",
'lcdVisible' : True,
'ledVisible' : True,
'lcdDecimals' : 0,
'FrameDesign' : frame,
'ColorDef' : color,
'BackgroundColor': bgColor,
'LcdColor' : lcd,
'LedColor' : led,
'minMeasuredValueVisible' : True,
'maxMeasuredValueVisible' : True,
'threshold' : 50,
'minValue' : 0,
'maxValue' : 100,
}
}
def __init__(self, client, prefix, deviceID, widgetID, pageId, page):
self.client = client
self.prefix = prefix
self.deviceID = deviceID
self.config['id'] = widgetID
self.config['topic'] = prefix + "/" + deviceID + "/la15"
self.config['pageId'] = pageId
self.config['page'] = page
self.t = threading.Timer(10.0, self.send)
self.t.start()
def send_config(self):
print('Publish config:'+ json.dumps(self.config))
self.client.publish( self.prefix + "/" + self.deviceID + '/config', json.dumps(self.config))
self.counter = 5 * 60 / 10 # 5 min send
self.send()
def send(self):
self.t.cancel()
self.t = threading.Timer(10.0, self.send)
self.t.start()
if(self.counter > 0):
topic = self.prefix + "/" + self.deviceID + "/la15/status"
val = float(self.get()[2]) * 100
print('Publish ' + topic + ':' + str(val))
self.client.publish(topic, json.dumps({ 'status': val}) )
def get(self):
with open('/proc/loadavg') as f:
loadavg = f.readlines()
return str(loadavg[0]).replace('\n', '').split(' ')
``` |
{
"source": "4restwilliams/isce2",
"score": 2
} |
#### File: isceobj/Alos2burstProc/Alos2burstProc.py
```python
import os
import logging
import logging.config
from iscesys.Component.Component import Component
from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil as DTU
from iscesys.Compatibility import Compatibility
MASTER_DATE = Component.Parameter('masterDate',
public_name='master date',
default=None,
type=str,
mandatory=True,
doc='master acquistion date')
SLAVE_DATE = Component.Parameter('slaveDate',
public_name='slave date',
default=None,
type=str,
mandatory=True,
doc='slave acquistion date')
MODE_COMBINATION = Component.Parameter('modeCombination',
public_name='mode combination',
default=None,
type=int,
mandatory=True,
doc='mode combination')
MASTER_FRAMES = Component.Parameter('masterFrames',
public_name = 'master frames',
default = None,
type=str,
container=list,
mandatory=False,
doc = 'master frames to process')
SLAVE_FRAMES = Component.Parameter('slaveFrames',
public_name = 'slave frames',
default = None,
type=str,
container=list,
mandatory=False,
doc = 'slave frames to process')
STARTING_SWATH = Component.Parameter('startingSwath',
public_name='starting swath',
default=1,
type=int,
mandatory=False,
doc="starting swath to process")
ENDING_SWATH = Component.Parameter('endingSwath',
public_name='ending swath',
default=5,
type=int,
mandatory=False,
doc="ending swath to process")
BURST_UNSYNCHRONIZED_TIME = Component.Parameter('burstUnsynchronizedTime',
public_name = 'burst unsynchronized time',
default = None,
type = float,
mandatory = False,
doc = 'burst unsynchronized time in second')
BURST_SYNCHRONIZATION = Component.Parameter('burstSynchronization',
public_name = 'burst synchronization',
default = None,
type = float,
mandatory = False,
doc = 'average burst synchronization of all swaths and frames in percentage')
RANGE_RESIDUAL_OFFSET_CC = Component.Parameter('rangeResidualOffsetCc',
public_name = 'range residual offset estimated by cross correlation',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'range residual offset estimated by cross correlation')
AZIMUTH_RESIDUAL_OFFSET_CC = Component.Parameter('azimuthResidualOffsetCc',
public_name = 'azimuth residual offset estimated by cross correlation',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'azimuth residual offset estimated by cross correlation')
RANGE_RESIDUAL_OFFSET_SD = Component.Parameter('rangeResidualOffsetSd',
public_name = 'range residual offset estimated by spectral diversity',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'range residual offset estimated by spectral diversity')
AZIMUTH_RESIDUAL_OFFSET_SD = Component.Parameter('azimuthResidualOffsetSd',
public_name = 'azimuth residual offset estimated by spectral diversity',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'azimuth residual offset estimated by spectral diversity')
SWATH_RANGE_OFFSET_GEOMETRICAL_MASTER = Component.Parameter('swathRangeOffsetGeometricalMaster',
public_name = 'swath range offset from geometry master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath range offset from geometry master')
SWATH_AZIMUTH_OFFSET_GEOMETRICAL_MASTER = Component.Parameter('swathAzimuthOffsetGeometricalMaster',
public_name = 'swath azimuth offset from geometry master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath azimuth offset from geometry master')
SWATH_RANGE_OFFSET_MATCHING_MASTER = Component.Parameter('swathRangeOffsetMatchingMaster',
public_name = 'swath range offset from matching master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath range offset from matching master')
SWATH_AZIMUTH_OFFSET_MATCHING_MASTER = Component.Parameter('swathAzimuthOffsetMatchingMaster',
public_name = 'swath azimuth offset from matching master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath azimuth offset from matching master')
SWATH_RANGE_OFFSET_GEOMETRICAL_SLAVE = Component.Parameter('swathRangeOffsetGeometricalSlave',
public_name = 'swath range offset from geometry slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath range offset from geometry slave')
SWATH_AZIMUTH_OFFSET_GEOMETRICAL_SLAVE = Component.Parameter('swathAzimuthOffsetGeometricalSlave',
public_name = 'swath azimuth offset from geometry slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath azimuth offset from geometry slave')
SWATH_RANGE_OFFSET_MATCHING_SLAVE = Component.Parameter('swathRangeOffsetMatchingSlave',
public_name = 'swath range offset from matching slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath range offset from matching slave')
SWATH_AZIMUTH_OFFSET_MATCHING_SLAVE = Component.Parameter('swathAzimuthOffsetMatchingSlave',
public_name = 'swath azimuth offset from matching slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'swath azimuth offset from matching slave')
FRAME_RANGE_OFFSET_GEOMETRICAL_MASTER = Component.Parameter('frameRangeOffsetGeometricalMaster',
public_name = 'frame range offset from geometry master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame range offset from geometry master')
FRAME_AZIMUTH_OFFSET_GEOMETRICAL_MASTER = Component.Parameter('frameAzimuthOffsetGeometricalMaster',
public_name = 'frame azimuth offset from geometry master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame azimuth offset from geometry master')
FRAME_RANGE_OFFSET_MATCHING_MASTER = Component.Parameter('frameRangeOffsetMatchingMaster',
public_name = 'frame range offset from matching master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame range offset from matching master')
FRAME_AZIMUTH_OFFSET_MATCHING_MASTER = Component.Parameter('frameAzimuthOffsetMatchingMaster',
public_name = 'frame azimuth offset from matching master',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame azimuth offset from matching master')
FRAME_RANGE_OFFSET_GEOMETRICAL_SLAVE = Component.Parameter('frameRangeOffsetGeometricalSlave',
public_name = 'frame range offset from geometry slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame range offset from geometry slave')
FRAME_AZIMUTH_OFFSET_GEOMETRICAL_SLAVE = Component.Parameter('frameAzimuthOffsetGeometricalSlave',
public_name = 'frame azimuth offset from geometry slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame azimuth offset from geometry slave')
FRAME_RANGE_OFFSET_MATCHING_SLAVE = Component.Parameter('frameRangeOffsetMatchingSlave',
public_name = 'frame range offset from matching slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame range offset from matching slave')
FRAME_AZIMUTH_OFFSET_MATCHING_SLAVE = Component.Parameter('frameAzimuthOffsetMatchingSlave',
public_name = 'frame azimuth offset from matching slave',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'frame azimuth offset from matching slave')
NUMBER_RANGE_LOOKS1 = Component.Parameter('numberRangeLooks1',
public_name='number of range looks 1',
default=None,
type=int,
mandatory=False,
doc="number of range looks when forming interferogram")
NUMBER_AZIMUTH_LOOKS1 = Component.Parameter('numberAzimuthLooks1',
public_name='number of azimuth looks 1',
default=None,
type=int,
mandatory=False,
doc="number of azimuth looks when forming interferogram")
NUMBER_RANGE_LOOKS2 = Component.Parameter('numberRangeLooks2',
public_name='number of range looks 2',
default=None,
type=int,
mandatory=False,
doc="number of range looks for further multiple looking")
NUMBER_AZIMUTH_LOOKS2 = Component.Parameter('numberAzimuthLooks2',
public_name='number of azimuth looks 2',
default=None,
type=int,
mandatory=False,
doc="number of azimuth looks for further multiple looking")
NUMBER_RANGE_LOOKS_SIM = Component.Parameter('numberRangeLooksSim',
public_name='number of range looks sim',
default=None,
type=int,
mandatory=False,
doc="number of range looks when simulating radar image")
NUMBER_AZIMUTH_LOOKS_SIM = Component.Parameter('numberAzimuthLooksSim',
public_name='number of azimuth looks sim',
default=None,
type=int,
mandatory=False,
doc="number of azimuth looks when simulating radar image")
NUMBER_RANGE_LOOKS_ION = Component.Parameter('numberRangeLooksIon',
public_name='number of range looks ion',
default=None,
type=int,
mandatory=False,
doc="number of range looks for ionospheric correction")
NUMBER_AZIMUTH_LOOKS_ION = Component.Parameter('numberAzimuthLooksIon',
public_name='number of azimuth looks ion',
default=None,
type=int,
mandatory=False,
doc="number of azimuth looks for ionospheric correction")
NUMBER_RANGE_LOOKS_SD = Component.Parameter('numberRangeLooksSd',
public_name='number of range looks sd',
default=None,
type=int,
mandatory=False,
doc="number of range looks for spectral diversity")
NUMBER_AZIMUTH_LOOKS_SD = Component.Parameter('numberAzimuthLooksSd',
public_name='number of azimuth looks sd',
default=None,
type=int,
mandatory=False,
doc="number of azimuth looks for spectral diversity")
SUBBAND_RADAR_WAVLENGTH = Component.Parameter('subbandRadarWavelength',
public_name='lower and upper radar wavelength for ionosphere correction',
default=None,
type=float,
mandatory=False,
container = list,
doc="lower and upper radar wavelength for ionosphere correction")
RADAR_DEM_AFFINE_TRANSFORM = Component.Parameter('radarDemAffineTransform',
public_name = 'radar dem affine transform parameters',
default = None,
type = float,
mandatory = True,
container = list,
doc = 'radar dem affine transform parameters')
MASTER_SLC = Component.Parameter('masterSlc',
public_name='master slc',
default=None,
type=str,
mandatory=False,
doc='master slc file')
SLAVE_SLC = Component.Parameter('slaveSlc',
public_name='slave slc',
default=None,
type=str,
mandatory=False,
doc='slave slc file')
MASTER_BURST_PREFIX = Component.Parameter('masterBurstPrefix',
public_name='master burst prefix',
default=None,
type=str,
mandatory=False,
doc='master burst prefix')
SLAVE_BURST_PREFIX = Component.Parameter('slaveBurstPrefix',
public_name='slave burst prefix',
default=None,
type=str,
mandatory=False,
doc='slave burst prefix')
MASTER_MAGNITUDE = Component.Parameter('masterMagnitude',
public_name='master magnitude',
default=None,
type=str,
mandatory=False,
doc='master magnitude file')
SLAVE_MAGNITUDE = Component.Parameter('slaveMagnitude',
public_name='slave magnitude',
default=None,
type=str,
mandatory=False,
doc='slave magnitude file')
MASTER_SWATH_OFFSET = Component.Parameter('masterSwathOffset',
public_name='master swath offset',
default=None,
type=str,
mandatory=False,
doc='master swath offset file')
SLAVE_SWATH_OFFSET = Component.Parameter('slaveSwathOffset',
public_name='slave swath offset',
default=None,
type=str,
mandatory=False,
doc='slave swath offset file')
MASTER_FRAME_OFFSET = Component.Parameter('masterFrameOffset',
public_name='master frame offset',
default=None,
type=str,
mandatory=False,
doc='master frame offset file')
SLAVE_FRAME_OFFSET = Component.Parameter('slaveFrameOffset',
public_name='slave frame offset',
default=None,
type=str,
mandatory=False,
doc='slave frame offset file')
MASTER_FRAME_PARAMETER = Component.Parameter('masterFrameParameter',
public_name='master frame parameter',
default=None,
type=str,
mandatory=False,
doc='master frame parameter file')
SLAVE_FRAME_PARAMETER = Component.Parameter('slaveFrameParameter',
public_name='slave frame parameter',
default=None,
type=str,
mandatory=False,
doc='slave frame parameter file')
MASTER_TRACK_PARAMETER = Component.Parameter('masterTrackParameter',
public_name='master track parameter',
default=None,
type=str,
mandatory=False,
doc='master track parameter file')
SLAVE_TRACK_PARAMETER = Component.Parameter('slaveTrackParameter',
public_name='slave track parameter',
default=None,
type=str,
mandatory=False,
doc='slave track parameter file')
DEM = Component.Parameter('dem',
public_name='dem for coregistration',
default=None,
type=str,
mandatory=False,
doc='dem for coregistration file')
DEM_GEO = Component.Parameter('demGeo',
public_name='dem for geocoding',
default=None,
type=str,
mandatory=False,
doc='dem for geocoding file')
WBD = Component.Parameter('wbd',
public_name='water body',
default=None,
type=str,
mandatory=False,
doc='water body file')
WBD_OUT = Component.Parameter('wbdOut',
public_name='output water body',
default=None,
type=str,
mandatory=False,
doc='output water body file')
INTERFEROGRAM = Component.Parameter('interferogram',
public_name='interferogram',
default=None,
type=str,
mandatory=False,
doc='interferogram file')
AMPLITUDE = Component.Parameter('amplitude',
public_name='amplitude',
default=None,
type=str,
mandatory=False,
doc='amplitude file')
DIFFERENTIAL_INTERFEROGRAM = Component.Parameter('differentialInterferogram',
public_name='differential interferogram',
default=None,
type=str,
mandatory=False,
doc='differential interferogram file')
MULTILOOK_DIFFERENTIAL_INTERFEROGRAM = Component.Parameter('multilookDifferentialInterferogram',
public_name='multilook differential interferogram',
default=None,
type=str,
mandatory=False,
doc='multilook differential interferogram file')
MULTILOOK_DIFFERENTIAL_INTERFEROGRAM_ORIGINAL = Component.Parameter('multilookDifferentialInterferogramOriginal',
public_name='original multilook differential interferogram',
default=None,
type=str,
mandatory=False,
doc='original multilook differential interferogram file')
MULTILOOK_AMPLITUDE = Component.Parameter('multilookAmplitude',
public_name='multilook amplitude',
default=None,
type=str,
mandatory=False,
doc='multilook amplitude file')
MULTILOOK_COHERENCE = Component.Parameter('multilookCoherence',
public_name='multilook coherence',
default=None,
type=str,
mandatory=False,
doc='multilook coherence file')
MULTILOOK_PHSIG = Component.Parameter('multilookPhsig',
public_name='multilook phase sigma',
default=None,
type=str,
mandatory=False,
doc='multilook phase sigma file')
FILTERED_INTERFEROGRAM = Component.Parameter('filteredInterferogram',
public_name='filtered interferogram',
default=None,
type=str,
mandatory=False,
doc='filtered interferogram file')
UNWRAPPED_INTERFEROGRAM = Component.Parameter('unwrappedInterferogram',
public_name='unwrapped interferogram',
default=None,
type=str,
mandatory=False,
doc='unwrapped interferogram file')
UNWRAPPED_MASKED_INTERFEROGRAM = Component.Parameter('unwrappedMaskedInterferogram',
public_name='unwrapped masked interferogram',
default=None,
type=str,
mandatory=False,
doc='unwrapped masked interferogram file')
LATITUDE = Component.Parameter('latitude',
public_name='latitude',
default=None,
type=str,
mandatory=False,
doc='latitude file')
LONGITUDE = Component.Parameter('longitude',
public_name='longitude',
default=None,
type=str,
mandatory=False,
doc='longitude file')
HEIGHT = Component.Parameter('height',
public_name='height',
default=None,
type=str,
mandatory=False,
doc='height file')
LOS = Component.Parameter('los',
public_name='los',
default=None,
type=str,
mandatory=False,
doc='los file')
SIM = Component.Parameter('sim',
public_name='sim',
default=None,
type=str,
mandatory=False,
doc='sim file')
MSK = Component.Parameter('msk',
public_name='msk',
default=None,
type=str,
mandatory=False,
doc='msk file')
RANGE_OFFSET = Component.Parameter('rangeOffset',
public_name='range offset',
default=None,
type=str,
mandatory=False,
doc='range offset file')
AZIMUTH_OFFSET = Component.Parameter('azimuthOffset',
public_name='azimuth offset',
default=None,
type=str,
mandatory=False,
doc='azimuth offset file')
MULTILOOK_LOS = Component.Parameter('multilookLos',
public_name='multilook los',
default=None,
type=str,
mandatory=False,
doc='multilook los file')
MULTILOOK_MSK = Component.Parameter('multilookMsk',
public_name='multilook msk',
default=None,
type=str,
mandatory=False,
doc='multilook msk file')
MULTILOOK_WBD_OUT = Component.Parameter('multilookWbdOut',
public_name='multilook wbdOut',
default=None,
type=str,
mandatory=False,
doc='multilook output water body file')
MULTILOOK_LATITUDE = Component.Parameter('multilookLatitude',
public_name='multilook latitude',
default=None,
type=str,
mandatory=False,
doc='multilook latitude file')
MULTILOOK_LONGITUDE = Component.Parameter('multilookLongitude',
public_name='multilook longitude',
default=None,
type=str,
mandatory=False,
doc='multilook longitude file')
MULTILOOK_HEIGHT = Component.Parameter('multilookHeight',
public_name='multilook height',
default=None,
type=str,
mandatory=False,
doc='multilook height file')
MULTILOOK_ION = Component.Parameter('multilookIon',
public_name='multilook ionospheric phase',
default=None,
type=str,
mandatory=False,
doc='multilook ionospheric phase file')
RECT_RANGE_OFFSET = Component.Parameter('rectRangeOffset',
public_name='rectified range offset',
default=None,
type=str,
mandatory=False,
doc='rectified range offset file')
GEO_INTERFEROGRAM = Component.Parameter('geoInterferogram',
public_name='geocoded interferogram',
default=None,
type=str,
mandatory=False,
doc='geocoded interferogram file')
GEO_MASKED_INTERFEROGRAM = Component.Parameter('geoMaskedInterferogram',
public_name='geocoded masked interferogram',
default=None,
type=str,
mandatory=False,
doc='geocoded masked interferogram file')
GEO_COHERENCE = Component.Parameter('geoCoherence',
public_name='geocoded coherence',
default=None,
type=str,
mandatory=False,
container = list,
doc='geocoded coherence file')
GEO_LOS = Component.Parameter('geoLos',
public_name='geocoded los',
default=None,
type=str,
mandatory=False,
doc='geocoded los file')
GEO_ION = Component.Parameter('geoIon',
public_name='geocoded ionospheric phase',
default=None,
type=str,
mandatory=False,
doc='geocoded ionospheric phase file')
###################################################################
#spectral diversity
INTERFEROGRAM_SD = Component.Parameter('interferogramSd',
public_name='spectral diversity interferograms',
default=None,
type=str,
mandatory=False,
container = list,
doc='spectral diversity interferogram files')
MULTILOOK_INTERFEROGRAM_SD = Component.Parameter('multilookInterferogramSd',
public_name='multilook spectral diversity interferograms',
default=None,
type=str,
mandatory=False,
container = list,
doc='multilook spectral diversity interferogram files')
MULTILOOK_COHERENCE_SD = Component.Parameter('multilookCoherenceSd',
public_name='multilook coherence for spectral diversity',
default=None,
type=str,
mandatory=False,
doc='multilook coherence for spectral diversity file')
FILTERED_INTERFEROGRAM_SD = Component.Parameter('filteredInterferogramSd',
public_name='filtered spectral diversity interferograms',
default=None,
type=str,
mandatory=False,
container = list,
doc='filtered spectral diversity interferogram files')
UNWRAPPED_INTERFEROGRAM_SD = Component.Parameter('unwrappedInterferogramSd',
public_name='unwrapped spectral diversity interferograms',
default=None,
type=str,
mandatory=False,
container = list,
doc='unwrapped spectral diversity interferogram files')
UNWRAPPED_MASKED_INTERFEROGRAM_SD = Component.Parameter('unwrappedMaskedInterferogramSd',
public_name='unwrapped masked spectral diversity interferograms',
default=None,
type=str,
mandatory=False,
container = list,
doc='unwrapped masked spectral diversity interferogram files')
AZIMUTH_DEFORMATION_SD = Component.Parameter('azimuthDeformationSd',
public_name='azimuth deformation',
default=None,
type=str,
mandatory=False,
container = list,
doc='azimuth deformation files')
MASKED_AZIMUTH_DEFORMATION_SD = Component.Parameter('maskedAzimuthDeformationSd',
public_name='masked azimuth deformation',
default=None,
type=str,
mandatory=False,
container = list,
doc='masked azimuth deformation files')
MULTILOOK_WBD_OUT_SD = Component.Parameter('multilookWbdOutSd',
public_name='multilook wbdOut for SD',
default=None,
type=str,
mandatory=False,
doc='multilook output water body for SD file')
MULTILOOK_LATITUDE_SD = Component.Parameter('multilookLatitudeSd',
public_name='multilook latitude for SD',
default=None,
type=str,
mandatory=False,
doc='multilook latitude for SD file')
MULTILOOK_LONGITUDE_SD = Component.Parameter('multilookLongitudeSd',
public_name='multilook longitude for SD',
default=None,
type=str,
mandatory=False,
doc='multilook longitude for SD file')
GEO_COHERENCE_SD = Component.Parameter('geoCoherenceSd',
public_name='geocoded coherence for spectral diversity',
default=None,
type=str,
mandatory=False,
container = list,
doc='geocoded coherence for spectral diversity file')
GEO_AZIMUTH_DEFORMATION_SD = Component.Parameter('geoAzimuthDeformationSd',
public_name='geocoded azimuth deformation',
default=None,
type=str,
mandatory=False,
container = list,
doc='geocoded azimuth deformation files')
GEO_MASKED_AZIMUTH_DEFORMATION_SD = Component.Parameter('geoMaskedAzimuthDeformationSd',
public_name='geocoded masked azimuth deformation',
default=None,
type=str,
mandatory=False,
container = list,
doc='geocoded masked azimuth deformation files')
###################################################################
class Alos2burstProc(Component):
"""
This class holds the properties, along with methods (setters and getters)
to modify and return their values.
"""
parameter_list = (MASTER_DATE,
SLAVE_DATE,
MODE_COMBINATION,
MASTER_FRAMES,
SLAVE_FRAMES,
STARTING_SWATH,
ENDING_SWATH,
BURST_UNSYNCHRONIZED_TIME,
BURST_SYNCHRONIZATION,
RANGE_RESIDUAL_OFFSET_CC,
AZIMUTH_RESIDUAL_OFFSET_CC,
RANGE_RESIDUAL_OFFSET_SD,
AZIMUTH_RESIDUAL_OFFSET_SD,
SWATH_RANGE_OFFSET_GEOMETRICAL_MASTER,
SWATH_AZIMUTH_OFFSET_GEOMETRICAL_MASTER,
SWATH_RANGE_OFFSET_MATCHING_MASTER,
SWATH_AZIMUTH_OFFSET_MATCHING_MASTER,
SWATH_RANGE_OFFSET_GEOMETRICAL_SLAVE,
SWATH_AZIMUTH_OFFSET_GEOMETRICAL_SLAVE,
SWATH_RANGE_OFFSET_MATCHING_SLAVE,
SWATH_AZIMUTH_OFFSET_MATCHING_SLAVE,
FRAME_RANGE_OFFSET_GEOMETRICAL_MASTER,
FRAME_AZIMUTH_OFFSET_GEOMETRICAL_MASTER,
FRAME_RANGE_OFFSET_MATCHING_MASTER,
FRAME_AZIMUTH_OFFSET_MATCHING_MASTER,
FRAME_RANGE_OFFSET_GEOMETRICAL_SLAVE,
FRAME_AZIMUTH_OFFSET_GEOMETRICAL_SLAVE,
FRAME_RANGE_OFFSET_MATCHING_SLAVE,
FRAME_AZIMUTH_OFFSET_MATCHING_SLAVE,
NUMBER_RANGE_LOOKS1,
NUMBER_AZIMUTH_LOOKS1,
NUMBER_RANGE_LOOKS2,
NUMBER_AZIMUTH_LOOKS2,
NUMBER_RANGE_LOOKS_SIM,
NUMBER_AZIMUTH_LOOKS_SIM,
NUMBER_RANGE_LOOKS_ION,
NUMBER_AZIMUTH_LOOKS_ION,
NUMBER_RANGE_LOOKS_SD,
NUMBER_AZIMUTH_LOOKS_SD,
SUBBAND_RADAR_WAVLENGTH,
RADAR_DEM_AFFINE_TRANSFORM,
MASTER_SLC,
SLAVE_SLC,
MASTER_BURST_PREFIX,
SLAVE_BURST_PREFIX,
MASTER_MAGNITUDE,
SLAVE_MAGNITUDE,
MASTER_SWATH_OFFSET,
SLAVE_SWATH_OFFSET,
MASTER_FRAME_OFFSET,
SLAVE_FRAME_OFFSET,
MASTER_FRAME_PARAMETER,
SLAVE_FRAME_PARAMETER,
MASTER_TRACK_PARAMETER,
SLAVE_TRACK_PARAMETER,
DEM,
DEM_GEO,
WBD,
WBD_OUT,
INTERFEROGRAM,
AMPLITUDE,
DIFFERENTIAL_INTERFEROGRAM,
MULTILOOK_DIFFERENTIAL_INTERFEROGRAM,
MULTILOOK_DIFFERENTIAL_INTERFEROGRAM_ORIGINAL,
MULTILOOK_AMPLITUDE,
MULTILOOK_COHERENCE,
MULTILOOK_PHSIG,
FILTERED_INTERFEROGRAM,
UNWRAPPED_INTERFEROGRAM,
UNWRAPPED_MASKED_INTERFEROGRAM,
LATITUDE,
LONGITUDE,
HEIGHT,
LOS,
SIM,
MSK,
RANGE_OFFSET,
AZIMUTH_OFFSET,
MULTILOOK_LOS,
MULTILOOK_MSK,
MULTILOOK_WBD_OUT,
MULTILOOK_LATITUDE,
MULTILOOK_LONGITUDE,
MULTILOOK_HEIGHT,
MULTILOOK_ION,
RECT_RANGE_OFFSET,
GEO_INTERFEROGRAM,
GEO_MASKED_INTERFEROGRAM,
GEO_COHERENCE,
GEO_LOS,
GEO_ION,
#spectral diversity
INTERFEROGRAM_SD,
MULTILOOK_INTERFEROGRAM_SD,
MULTILOOK_COHERENCE_SD,
FILTERED_INTERFEROGRAM_SD,
UNWRAPPED_INTERFEROGRAM_SD,
UNWRAPPED_MASKED_INTERFEROGRAM_SD,
AZIMUTH_DEFORMATION_SD,
MASKED_AZIMUTH_DEFORMATION_SD,
MULTILOOK_WBD_OUT_SD,
MULTILOOK_LATITUDE_SD,
MULTILOOK_LONGITUDE_SD,
GEO_COHERENCE_SD,
GEO_AZIMUTH_DEFORMATION_SD,
GEO_MASKED_AZIMUTH_DEFORMATION_SD)
facility_list = ()
family='alos2burstcontext'
def __init__(self, name='', procDoc=None):
#self.updatePrivate()
super().__init__(family=self.__class__.family, name=name)
self.procDoc = procDoc
return None
def setFilename(self, masterDate, slaveDate, nrlks1, nalks1, nrlks2, nalks2):
# if masterDate == None:
# masterDate = self.masterDate
# if slaveDate == None:
# slaveDate = self.slaveDate
# if nrlks1 == None:
# nrlks1 = self.numberRangeLooks1
# if nalks1 == None:
# nalks1 = self.numberAzimuthLooks1
# if nrlks2 == None:
# nrlks2 = self.numberRangeLooks2
# if nalks2 == None:
# nalks2 = self.numberAzimuthLooks2
ms = masterDate + '-' + slaveDate
ml1 = '_{}rlks_{}alks'.format(nrlks1, nalks1)
ml2 = '_{}rlks_{}alks'.format(nrlks1*nrlks2, nalks1*nalks2)
self.masterSlc = masterDate + '.slc'
self.slaveSlc = slaveDate + '.slc'
self.masterBurstPrefix = masterDate
self.slaveBurstPrefix = slaveDate
self.masterMagnitude = masterDate + '.mag'
self.slaveMagnitude = slaveDate + '.mag'
self.masterSwathOffset = 'swath_offset_' + masterDate + '.txt'
self.slaveSwathOffset = 'swath_offset_' + slaveDate + '.txt'
self.masterFrameOffset = 'frame_offset_' + masterDate + '.txt'
self.slaveFrameOffset = 'frame_offset_' + slaveDate + '.txt'
self.masterFrameParameter = masterDate + '.frame.xml'
self.slaveFrameParameter = slaveDate + '.frame.xml'
self.masterTrackParameter = masterDate + '.track.xml'
self.slaveTrackParameter = slaveDate + '.track.xml'
#self.dem =
#self.demGeo =
#self.wbd =
self.interferogram = ms + ml1 + '.int'
self.amplitude = ms + ml1 + '.amp'
self.differentialInterferogram = 'diff_' + ms + ml1 + '.int'
self.multilookDifferentialInterferogram = 'diff_' + ms + ml2 + '.int'
self.multilookDifferentialInterferogramOriginal = 'diff_' + ms + ml2 + '_ori.int'
self.multilookAmplitude = ms + ml2 + '.amp'
self.multilookCoherence = ms + ml2 + '.cor'
self.multilookPhsig = ms + ml2 + '.phsig'
self.filteredInterferogram = 'filt_' + ms + ml2 + '.int'
self.unwrappedInterferogram = 'filt_' + ms + ml2 + '.unw'
self.unwrappedMaskedInterferogram = 'filt_' + ms + ml2 + '_msk.unw'
self.latitude = ms + ml1 + '.lat'
self.longitude = ms + ml1 + '.lon'
self.height = ms + ml1 + '.hgt'
self.los = ms + ml1 + '.los'
self.sim = ms + ml1 + '.sim'
self.msk = ms + ml1 + '.msk'
self.wbdOut = ms + ml1 + '.wbd'
self.rangeOffset = ms + ml1 + '_rg.off'
self.azimuthOffset = ms + ml1 + '_az.off'
self.multilookLos = ms + ml2 + '.los'
self.multilookWbdOut = ms + ml2 + '.wbd'
self.multilookMsk = ms + ml2 + '.msk'
self.multilookLatitude = ms + ml2 + '.lat'
self.multilookLongitude = ms + ml2 + '.lon'
self.multilookHeight = ms + ml2 + '.hgt'
self.multilookIon = ms + ml2 + '.ion'
self.rectRangeOffset = ms + ml1 + '_rg_rect.off'
self.geoInterferogram = 'filt_' + ms + ml2 + '.unw.geo'
self.geoMaskedInterferogram = 'filt_' + ms + ml2 + '_msk.unw.geo'
self.geoCoherence = ms + ml2 + '.cor.geo'
self.geoLos = ms + ml2 + '.los.geo'
self.geoIon = ms + ml2 + '.ion.geo'
def setFilenameSd(self, masterDate, slaveDate, nrlks1, nalks1, nrlks_sd, nalks_sd, nsd=3):
#spectral diversity
# if masterDate == None:
# masterDate = self.masterDate
# if slaveDate == None:
# slaveDate = self.slaveDate
# if nrlks1 == None:
# nrlks1 = self.numberRangeLooks1
# if nalks1 == None:
# nalks1 = self.numberAzimuthLooks1
# if nrlks_sd == None:
# nrlks_sd = self.numberRangeLooksSd
# if nalks_sd == None:
# nalks_sd = self.numberAzimuthLooksSd
ms = masterDate + '-' + slaveDate
ml1 = '_{}rlks_{}alks'.format(nrlks1, nalks1)
ml2sd = '_{}rlks_{}alks'.format(nrlks1*nrlks_sd, nalks1*nalks_sd)
self.interferogramSd = ['sd_{}_'.format(i+1) + ms + ml1 + '.int' for i in range(nsd)]
self.multilookInterferogramSd = ['sd_{}_'.format(i+1) + ms + ml2sd + '.int' for i in range(nsd)]
self.multilookCoherenceSd = ['filt_{}_'.format(i+1) + ms + ml2sd + '.cor' for i in range(nsd)]
self.filteredInterferogramSd = ['filt_{}_'.format(i+1) + ms + ml2sd + '.int' for i in range(nsd)]
self.unwrappedInterferogramSd = ['filt_{}_'.format(i+1) + ms + ml2sd + '.unw' for i in range(nsd)]
self.unwrappedMaskedInterferogramSd = ['filt_{}_'.format(i+1) + ms + ml2sd + '_msk.unw' for i in range(nsd)]
self.azimuthDeformationSd = ['azd_{}_'.format(i+1) + ms + ml2sd + '.unw' for i in range(nsd)]
self.azimuthDeformationSd.append('azd_' + ms + ml2sd + '.unw')
self.maskedAzimuthDeformationSd = ['azd_{}_'.format(i+1) + ms + ml2sd + '_msk.unw' for i in range(nsd)]
self.maskedAzimuthDeformationSd.append('azd_' + ms + ml2sd + '_msk.unw')
self.multilookWbdOutSd = ms + ml2sd + '.wbd'
self.multilookLatitudeSd = ms + ml2sd + '.lat'
self.multilookLongitudeSd = ms + ml2sd + '.lon'
self.geoCoherenceSd = ['filt_{}_'.format(i+1) + ms + ml2sd + '.cor.geo' for i in range(nsd)]
self.geoAzimuthDeformationSd = ['azd_{}_'.format(i+1) + ms + ml2sd + '.unw.geo' for i in range(nsd)]
self.geoAzimuthDeformationSd.append('azd_' + ms + ml2sd + '.unw.geo')
self.geoMaskedAzimuthDeformationSd = ['azd_{}_'.format(i+1) + ms + ml2sd + '_msk.unw.geo' for i in range(nsd)]
self.geoMaskedAzimuthDeformationSd.append('azd_' + ms + ml2sd + '_msk.unw.geo')
def loadProduct(self, xmlname):
'''
Load the product using Product Manager.
'''
from iscesys.Component.ProductManager import ProductManager as PM
pm = PM()
pm.configure()
obj = pm.loadProduct(xmlname)
return obj
def saveProduct(self, obj, xmlname):
'''
Save the product to an XML file using Product Manager.
'''
from iscesys.Component.ProductManager import ProductManager as PM
pm = PM()
pm.configure()
pm.dumpProduct(obj, xmlname)
return None
def loadTrack(self, master=True):
'''
Load the track using Product Manager.
'''
if master:
track = self.loadProduct(self.masterTrackParameter)
else:
track = self.loadProduct(self.slaveTrackParameter)
track.frames = []
for i, frameNumber in enumerate(self.masterFrames):
os.chdir('f{}_{}'.format(i+1, frameNumber))
if master:
track.frames.append(self.loadProduct(self.masterFrameParameter))
else:
track.frames.append(self.loadProduct(self.slaveFrameParameter))
os.chdir('../')
return track
def saveTrack(self, track, master=True):
'''
Save the track to XML files using Product Manager.
'''
if master:
self.saveProduct(track, self.masterTrackParameter)
else:
self.saveProduct(track, self.slaveTrackParameter)
for i, frameNumber in enumerate(self.masterFrames):
os.chdir('f{}_{}'.format(i+1, frameNumber))
if master:
self.saveProduct(track.frames[i], self.masterFrameParameter)
else:
self.saveProduct(track.frames[i], self.slaveFrameParameter)
os.chdir('../')
return None
def hasGPU(self):
'''
Determine if GPU modules are available.
'''
flag = False
try:
from zerodop.GPUtopozero.GPUtopozero import PyTopozero
from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr
flag = True
except:
pass
return flag
```
#### File: isceobj/Alos2burstProc/runCoregGeom.py
```python
import os
import logging
import isceobj
from isceobj.Alos2Proc.runRdr2Geo import topoCPU
from isceobj.Alos2Proc.runRdr2Geo import topoGPU
from isceobj.Alos2Proc.runGeo2Rdr import geo2RdrCPU
from isceobj.Alos2Proc.runGeo2Rdr import geo2RdrGPU
from isceobj.Alos2Proc.Alos2ProcPublic import waterBodyRadar
from isceobj.Alos2Proc.Alos2ProcPublic import resampleBursts
from isceobj.Alos2Proc.Alos2ProcPublic import mosaicBurstAmplitude
from isceobj.Alos2Proc.Alos2ProcPublic import mosaicBurstInterferogram
logger = logging.getLogger('isce.alos2burstinsar.runCoregGeom')
def runCoregGeom(self):
'''compute geometric offset
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
masterTrack = self._insar.loadTrack(master=True)
slaveTrack = self._insar.loadTrack(master=False)
demFile = os.path.abspath(self._insar.dem)
wbdFile = os.path.abspath(self._insar.wbd)
###############################################################################
for i, frameNumber in enumerate(self._insar.masterFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('processing frame {}, swath {}'.format(frameNumber, swathNumber))
masterSwath = masterTrack.frames[i].swaths[j]
slaveSwath = slaveTrack.frames[i].swaths[j]
##################################################
# compute geometric offsets
##################################################
#set up track parameters just for computing offsets
#ALL track parameters are listed here
#master
#masterTrack.passDirection =
#masterTrack.pointingDirection =
#masterTrack.operationMode =
#masterTrack.radarWavelength =
masterTrack.numberOfSamples = masterSwath.numberOfSamples
masterTrack.numberOfLines = masterSwath.numberOfLines
masterTrack.startingRange = masterSwath.startingRange
#masterTrack.rangeSamplingRate =
masterTrack.rangePixelSize = masterSwath.rangePixelSize
masterTrack.sensingStart = masterSwath.sensingStart
#masterTrack.prf =
#masterTrack.azimuthPixelSize =
masterTrack.azimuthLineInterval = masterSwath.azimuthLineInterval
#masterTrack.dopplerVsPixel =
#masterTrack.frames =
#masterTrack.orbit =
#slave
slaveTrack.numberOfSamples = slaveSwath.numberOfSamples
slaveTrack.numberOfLines = slaveSwath.numberOfLines
slaveTrack.startingRange = slaveSwath.startingRange
slaveTrack.rangePixelSize = slaveSwath.rangePixelSize
slaveTrack.sensingStart = slaveSwath.sensingStart
slaveTrack.azimuthLineInterval = slaveSwath.azimuthLineInterval
if self.useGPU and self._insar.hasGPU():
topoGPU(masterTrack, 1, 1, demFile,
self._insar.latitude, self._insar.longitude, self._insar.height, self._insar.los)
geo2RdrGPU(slaveTrack, 1, 1,
self._insar.latitude, self._insar.longitude, self._insar.height, self._insar.rangeOffset, self._insar.azimuthOffset)
else:
topoCPU(masterTrack, 1, 1, demFile,
self._insar.latitude, self._insar.longitude, self._insar.height, self._insar.los)
geo2RdrCPU(slaveTrack, 1, 1,
self._insar.latitude, self._insar.longitude, self._insar.height, self._insar.rangeOffset, self._insar.azimuthOffset)
waterBodyRadar(self._insar.latitude, self._insar.longitude, wbdFile, self._insar.wbdOut)
#clear up, leaving only range/azimuth offsets
os.remove(self._insar.latitude)
os.remove(self._insar.latitude+'.vrt')
os.remove(self._insar.latitude+'.xml')
os.remove(self._insar.longitude)
os.remove(self._insar.longitude+'.vrt')
os.remove(self._insar.longitude+'.xml')
os.remove(self._insar.height)
os.remove(self._insar.height+'.vrt')
os.remove(self._insar.height+'.xml')
os.remove(self._insar.los)
os.remove(self._insar.los+'.vrt')
os.remove(self._insar.los+'.xml')
##################################################
# resample bursts
##################################################
slaveBurstResampledDir = self._insar.slaveBurstPrefix + '_1_coreg_geom'
#interferogramDir = self._insar.masterBurstPrefix + '-' + self._insar.slaveBurstPrefix + '_coreg_geom'
interferogramDir = 'burst_interf_1_coreg_geom'
interferogramPrefix = self._insar.masterBurstPrefix + '-' + self._insar.slaveBurstPrefix
resampleBursts(masterSwath, slaveSwath,
self._insar.masterBurstPrefix, self._insar.slaveBurstPrefix, slaveBurstResampledDir, interferogramDir,
self._insar.masterBurstPrefix, self._insar.slaveBurstPrefix, self._insar.slaveBurstPrefix, interferogramPrefix,
self._insar.rangeOffset, self._insar.azimuthOffset, rangeOffsetResidual=0, azimuthOffsetResidual=0)
##################################################
# mosaic burst amplitudes and interferograms
##################################################
os.chdir(slaveBurstResampledDir)
mosaicBurstAmplitude(masterSwath, self._insar.slaveBurstPrefix, self._insar.slaveMagnitude, numberOfLooksThreshold=4)
os.chdir('../')
#the interferogram is not good enough, do not mosaic
mosaic=False
if mosaic:
os.chdir(interferogramDir)
mosaicBurstInterferogram(masterSwath, interferogramPrefix, self._insar.interferogram, numberOfLooksThreshold=4)
os.chdir('../')
os.chdir('../')
os.chdir('../')
###############################################################################
catalog.printToLog(logger, "runCoregGeom")
self._insar.procDoc.addAllFromCatalog(catalog)
```
#### File: isceobj/Alos2burstProc/runGeocodeSd.py
```python
import os
import logging
import numpy as np
import isceobj
from isceobj.Alos2Proc.runGeocode import geocode
from isceobj.Alos2Proc.Alos2ProcPublic import getBboxGeo
logger = logging.getLogger('isce.alos2insar.runGeocodeSd')
def runGeocodeSd(self):
'''geocode final products
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
masterTrack = self._insar.loadTrack(master=True)
#slaveTrack = self._insar.loadTrack(master=False)
demFile = os.path.abspath(self._insar.demGeo)
sdDir = 'sd'
if not os.path.exists(sdDir):
os.makedirs(sdDir)
os.chdir(sdDir)
if self.geocodeListSd == None:
geocodeList = self._insar.multilookCoherenceSd + self._insar.azimuthDeformationSd + self._insar.maskedAzimuthDeformationSd
else:
geocodeList = self.geocodeListSd
if self.bbox == None:
bbox = getBboxGeo(masterTrack)
else:
bbox = self.bbox
catalog.addItem('geocode bounding box', bbox, 'runGeocodeSd')
numberRangeLooks = self._insar.numberRangeLooks1 * self._insar.numberRangeLooksSd
numberAzimuthLooks = self._insar.numberAzimuthLooks1 * self._insar.numberAzimuthLooksSd
for inputFile in geocodeList:
if self.geocodeInterpMethodSd == None:
img = isceobj.createImage()
img.load(inputFile + '.xml')
if img.dataType.upper() == 'CFLOAT':
interpMethod = 'sinc'
else:
interpMethod = 'bilinear'
else:
interpMethod = self.geocodeInterpMethodSd.lower()
geocode(masterTrack, demFile, inputFile, bbox, numberRangeLooks, numberAzimuthLooks, interpMethod, 0, 0)
os.chdir('../')
catalog.printToLog(logger, "runGeocodeSd")
self._insar.procDoc.addAllFromCatalog(catalog)
```
#### File: isceobj/Alos2Proc/runSlcMatch.py
```python
import os
import logging
import numpy as np
import isceobj
from isceobj.Alos2Proc.runRdr2Geo import topoCPU
from isceobj.Alos2Proc.runRdr2Geo import topoGPU
from isceobj.Alos2Proc.runGeo2Rdr import geo2RdrCPU
from isceobj.Alos2Proc.runGeo2Rdr import geo2RdrGPU
from contrib.alos2proc.alos2proc import resamp
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
from isceobj.Alos2Proc.Alos2ProcPublic import renameFile
from isceobj.Alos2Proc.Alos2ProcPublic import waterBodyRadar
from mroipac.ampcor.Ampcor import Ampcor
from isceobj.Alos2Proc.Alos2ProcPublic import meanOffset
from isceobj.Alos2Proc.Alos2ProcPublic import cullOffsetsRoipac
logger = logging.getLogger('isce.alos2insar.runSlcMatch')
def runSlcMatch(self):
'''match a pair of SLCs
'''
if not self.doDenseOffset:
return
if not ((self._insar.modeCombination == 0) or (self._insar.modeCombination == 1)):
return
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
demFile = os.path.abspath(self._insar.dem)
wbdFile = os.path.abspath(self._insar.wbd)
denseOffsetDir = 'dense_offset'
if not os.path.exists(denseOffsetDir):
os.makedirs(denseOffsetDir)
os.chdir(denseOffsetDir)
masterTrack = self._insar.loadProduct(self._insar.masterTrackParameter)
slaveTrack = self._insar.loadProduct(self._insar.slaveTrackParameter)
#########################################################################################
##################################################
# compute geometric offsets
##################################################
if self.useGPU and self._insar.hasGPU():
topoGPU(masterTrack, 1, 1, demFile,
'lat.rdr', 'lon.rdr', 'hgt.rdr', 'los.rdr')
geo2RdrGPU(slaveTrack, 1, 1,
'lat.rdr', 'lon.rdr', 'hgt.rdr', 'rg.off', 'az.off')
else:
topoCPU(masterTrack, 1, 1, demFile,
'lat.rdr', 'lon.rdr', 'hgt.rdr', 'los.rdr')
geo2RdrCPU(slaveTrack, 1, 1,
'lat.rdr', 'lon.rdr', 'hgt.rdr', 'rg.off', 'az.off')
##################################################
# resample SLC
##################################################
#SlaveSlcResampled = os.path.splitext(self._insar.slaveSlc)[0]+'_resamp'+os.path.splitext(self._insar.slaveSlc)[1]
SlaveSlcResampled = self._insar.slaveSlcCoregistered
rangeOffsets2Frac = 0.0
azimuthOffsets2Frac = 0.0
resamp(self._insar.slaveSlc,
SlaveSlcResampled,
'rg.off',
'az.off',
masterTrack.numberOfSamples, masterTrack.numberOfLines,
slaveTrack.prf,
slaveTrack.dopplerVsPixel,
[rangeOffsets2Frac, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[azimuthOffsets2Frac, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
create_xml(SlaveSlcResampled, masterTrack.numberOfSamples, masterTrack.numberOfLines, 'slc')
if self.estimateResidualOffset:
numberOfOffsets = 800
rangeStep = 50
length = masterTrack.numberOfLines
width = masterTrack.numberOfSamples
waterBodyRadar('lat.rdr', 'lon.rdr', wbdFile, 'wbd.rdr')
wbd=np.memmap('wbd.rdr', dtype=np.int8, mode='r', shape=(length, width))
azimuthStep = int(length/width*rangeStep+0.5)
landRatio = np.sum(wbd[0:length:azimuthStep,0:width:rangeStep]!=-1)/(int(length/azimuthStep)*int(width/rangeStep))
del wbd
if (landRatio <= 0.00125):
print('\n\nWARNING: land area too small for estimating residual slc offsets')
print('do not estimate residual offsets\n\n')
catalog.addItem('warning message', 'land area too small for estimating residual slc offsets', 'runSlcMatch')
else:
numberOfOffsets /= landRatio
#we use equal number of offsets in range and azimuth here
numberOfOffsetsRange = int(np.sqrt(numberOfOffsets)+0.5)
numberOfOffsetsAzimuth = int(np.sqrt(numberOfOffsets)+0.5)
if numberOfOffsetsRange > int(width/2):
numberOfOffsetsRange = int(width/2)
if numberOfOffsetsAzimuth > int(length/2):
numberOfOffsetsAzimuth = int(length/2)
if numberOfOffsetsRange < 10:
numberOfOffsetsRange = 10
if numberOfOffsetsAzimuth < 10:
numberOfOffsetsAzimuth = 10
##########################################
#2. match using ampcor
##########################################
ampcor = Ampcor(name='insarapp_slcs_ampcor')
ampcor.configure()
mSLC = isceobj.createSlcImage()
mSLC.load(self._insar.masterSlc+'.xml')
mSLC.setAccessMode('read')
mSLC.createImage()
sSLC = isceobj.createSlcImage()
sSLC.load(SlaveSlcResampled+'.xml')
sSLC.setAccessMode('read')
sSLC.createImage()
ampcor.setImageDataType1('complex')
ampcor.setImageDataType2('complex')
ampcor.setMasterSlcImage(mSLC)
ampcor.setSlaveSlcImage(sSLC)
#MATCH REGION
#compute an offset at image center to use
rgoff = 0.0
azoff = 0.0
#it seems that we cannot use 0, haven't look into the problem
if rgoff == 0:
rgoff = 1
if azoff == 0:
azoff = 1
firstSample = 1
if rgoff < 0:
firstSample = int(35 - rgoff)
firstLine = 1
if azoff < 0:
firstLine = int(35 - azoff)
ampcor.setAcrossGrossOffset(rgoff)
ampcor.setDownGrossOffset(azoff)
ampcor.setFirstSampleAcross(firstSample)
ampcor.setLastSampleAcross(mSLC.width)
ampcor.setNumberLocationAcross(numberOfOffsetsRange)
ampcor.setFirstSampleDown(firstLine)
ampcor.setLastSampleDown(mSLC.length)
ampcor.setNumberLocationDown(numberOfOffsetsAzimuth)
#MATCH PARAMETERS
#full-aperture mode
if (self._insar.modeCombination == 21) or \
(self._insar.modeCombination == 22) or \
(self._insar.modeCombination == 31) or \
(self._insar.modeCombination == 32):
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(512)
#note this is the half width/length of search area, number of resulting correlation samples: 32*2+1
ampcor.setSearchWindowSizeWidth(32)
ampcor.setSearchWindowSizeHeight(32)
#triggering full-aperture mode matching
ampcor.setWinsizeFilt(8)
ampcor.setOversamplingFactorFilt(64)
#regular mode
else:
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(64)
ampcor.setSearchWindowSizeWidth(16)
ampcor.setSearchWindowSizeHeight(16)
#REST OF THE STUFF
ampcor.setAcrossLooks(1)
ampcor.setDownLooks(1)
ampcor.setOversamplingFactor(64)
ampcor.setZoomWindowSize(16)
#1. The following not set
#Matching Scale for Sample/Line Directions (-) = 1. 1.
#should add the following in Ampcor.py?
#if not set, in this case, Ampcor.py'value is also 1. 1.
#ampcor.setScaleFactorX(1.)
#ampcor.setScaleFactorY(1.)
#MATCH THRESHOLDS AND DEBUG DATA
#2. The following not set
#in roi_pac the value is set to 0 1
#in isce the value is set to 0.001 1000.0
#SNR and Covariance Thresholds (-) = {s1} {s2}
#should add the following in Ampcor?
#THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC
#ampcor.setThresholdSNR(0)
#ampcor.setThresholdCov(1)
ampcor.setDebugFlag(False)
ampcor.setDisplayFlag(False)
#in summary, only two things not set which are indicated by 'The following not set' above.
#run ampcor
ampcor.ampcor()
offsets = ampcor.getOffsetField()
mSLC.finalizeImage()
sSLC.finalizeImage()
#3. cull offsets
refinedOffsets = cullOffsetsRoipac(offsets, numThreshold=50)
if refinedOffsets == None:
print('\n\nWARNING: too few offsets left for slc residual offset estimation')
print('do not estimate residual offsets\n\n')
catalog.addItem('warning message', 'too few offsets left for slc residual offset estimation', 'runSlcMatch')
else:
rangeOffset, azimuthOffset = meanOffset(refinedOffsets)
os.remove(SlaveSlcResampled)
os.remove(SlaveSlcResampled+'.vrt')
os.remove(SlaveSlcResampled+'.xml')
rangeOffsets2Frac = rangeOffset
azimuthOffsets2Frac = azimuthOffset
resamp(self._insar.slaveSlc,
SlaveSlcResampled,
'rg.off',
'az.off',
masterTrack.numberOfSamples, masterTrack.numberOfLines,
slaveTrack.prf,
slaveTrack.dopplerVsPixel,
[rangeOffsets2Frac, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[azimuthOffsets2Frac, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
create_xml(SlaveSlcResampled, masterTrack.numberOfSamples, masterTrack.numberOfLines, 'slc')
catalog.addItem('number of offsets range', numberOfOffsetsRange, 'runSlcMatch')
catalog.addItem('number of offsets azimuth', numberOfOffsetsAzimuth, 'runSlcMatch')
catalog.addItem('range residual offset after geometric coregistration', rangeOffset, 'runSlcMatch')
catalog.addItem('azimuth residual offset after geometric coregistration', azimuthOffset, 'runSlcMatch')
if self.deleteGeometryFiles:
os.remove('lat.rdr')
os.remove('lat.rdr.vrt')
os.remove('lat.rdr.xml')
os.remove('lon.rdr')
os.remove('lon.rdr.vrt')
os.remove('lon.rdr.xml')
os.remove('hgt.rdr')
os.remove('hgt.rdr.vrt')
os.remove('hgt.rdr.xml')
os.remove('los.rdr')
os.remove('los.rdr.vrt')
os.remove('los.rdr.xml')
# if os.path.isfile('wbd.rdr'):
# os.remove('wbd.rdr')
# os.remove('wbd.rdr.vrt')
# os.remove('wbd.rdr.xml')
#########################################################################################
os.chdir('../')
catalog.printToLog(logger, "runSlcMatch")
self._insar.procDoc.addAllFromCatalog(catalog)
``` |
{
"source": "4rgc/PiazzaBot",
"score": 3
} |
#### File: PiazzaBot/piazza_object/piazza_object.py
```python
class PiazzaObject:
def __init__(self, id, subject, content, date, link):
self.id = id
self.subject = subject
self.content = content
self.date = date
self.link = link
def to_string(self):
return f'Subject: {self.subject}\nContent: {self.content}\nDate: {self.date}\nLink: {self.link}'
``` |
{
"source": "4rk0z/AdventOfCode",
"score": 4
} |
#### File: solutions/day03/puzzle3.py
```python
def walk(x, y):
row = col = 0
moves = 0
while row < pattern_row_nums - 1:
col = (col + x) % pattern_col_nums
row += y
if pattern[row][col] == '#':
moves += 1
return moves
pattern = []
pattern_col_nums = pattern_row_nums = 0
with open("input", 'r', encoding="utf8") as input:
for lines in input:
lines = lines[:-1]
pattern += lines.split()
pattern_col_nums = len(pattern[0])
pattern_row_nums = len(pattern)
# part 1
print("part1:", walk(3, 1))
# part 2
print("part2:", walk(1, 1) * walk(3, 1) * walk(5, 1) * walk(7, 1) * walk(1, 2))
``` |
{
"source": "4roring/KK1StockSupporter",
"score": 2
} |
#### File: 4roring/KK1StockSupporter/KK1StockSupportBot.py
```python
import telegram
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
from multiprocessing import Value
def get_message(update: telegram.Update, context, event_handler: Value):
update.message.reply_text(update.message.text)
# 텔레그램 봇. 주식 API 쪽에서 Sender 역활을 하고 별도로 Updater 역활을 하는 쪽 두개의 객체를 생성해서 활용 예정.
class KK1StockSupportBot:
def __init__(self, token: str):
self.token = token
self.updater = Updater(token, use_context=True)
message_handler = MessageHandler(Filters.text & (~Filters.command), get_message)
self.updater.dispatcher.add_handler(message_handler)
def __del__(self):
self.updater.bot.sendMessage(456096060, '봇이 종료되었습니다. 다음 시작에 /init 을 수행해주세요.')
def add_command_handler(self, command: str, call_back):
# 텍스트에는 응답하지만 커맨드가 있으면 응답하지 않도록 설정한 것
command_handler = CommandHandler(command, call_back)
self.updater.dispatcher.add_handler(command_handler)
def SendMessage(self, message: str):
self.updater.bot.sendMessage(456096060, message)
def run(self):
print('Run KK1 Stock Support Bot')
self.updater.start_polling(timeout=3, clean=True)
self.updater.idle()
``` |
{
"source": "4rqm/pycord",
"score": 2
} |
#### File: pycord/pycord/client.py
```python
import inspect
import time
import traceback
from collections import deque
import asks
import trio
import sys
import multio
from .api import HttpClient, ShardConnection, Webhook
from .models import Channel, Guild, User
from .utils import Collection
from .utils import Emitter
from .utils.commands import Command, CommandCollection, Context
class Client(Emitter):
"""
Represents a client that connects to Discord.
This class is the core of the library, with all functionality
revolving around it.
Parameters
----------
shard_count : Optional[int]
The amount of shards to use, this will be automatically set
using the bot ws gateway endpoint if not provided.
message_cache_max : Optional[int]
The maximum number of messages to store in the internal deque
cache. Defaults to 2500 if not provided.
prefixes : optional[str, list]
The prefixes to use for commands. Can either be a list or a
single prefix. Defaults to 'py.' if not provided.
Attributes
----------
token : str
The bot token provided by the login method.
is_bot : bool
Specifies whether or not the client is a bot.
shards : list
Stores the client's ShardConnections (ws) indexed by id
users : collection
Stores all user objects that the client can see
guilds : collection
Stores all the guild objects the client is a part of currently.
channels : collection
Stores all the channel objects that the client can see.
messages : collection
A deque cache that stores the last x amount of messages
specified by the ``message_cache_max`` parameter.
commands : collection
A special collection that stores all registered commands
prefixes : list
Contains a list of prefixes that a command may be used with
session
An asks.Session that is for public use, this is different from
the internal session the HttpClient uses.
"""
def __init__(self, shard_count=-1, prefixes='py.', message_cache_max=2500, **kwargs):
super().__init__()
self.async_init()
self.token = ''
self.is_bot = True
self._boot_up_time = None
self.running = trio.Event()
self.api = HttpClient(self)
self.session = asks.Session() # public session
self.shards = [] if shard_count < 1 else list(range(shard_count))
self.users = Collection(User)
self.guilds = Collection(Guild)
self.channels = Collection(Channel)
self.messages = deque(maxlen=message_cache_max)
self.commands = CommandCollection(self)
self.webhooks = Collection(Webhook, indexor='name')
self.prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
self._nonces = dict()
def __del__(self):
if self.is_bot:
self.close()
def async_init(self):
multio.init('trio')
asks.init('trio')
def wait_for_nonce(self, nonce):
event = multio.Event()
self._nonces[str(nonce)] = event
return event.wait()
async def _close(self):
for shard in self.shards:
await shard.close()
self.running.set()
def close(self):
trio.run(self._close)
async def start(self, token, bot):
self.token = self.api.token = token
self.is_bot = bot
# get gateway info
endpoint = '/gateway'
if self.is_bot:
endpoint += '/bot'
info = await self.api.get(endpoint)
url = info.get('url')
# get amount of shards
shard_count = info.get('shards', 1)
if len(self.shards) < 1:
self.shards = list(range(shard_count))
else:
shard_count = len(self.shards)
# spawn shard connections
async with trio.open_nursery() as nursery:
for shard_id in range(shard_count):
shard = ShardConnection(self, shard_id, shard_count)
self.shards[shard_id] = shard
nursery.start_soon(shard.start, url)
# wait for client to stop running
await self.running.wait()
def login(self, token, bot=True):
self._boot_up_time = time.time()
try:
trio.run(self.start, token, bot)
except KeyboardInterrupt:
pass
finally:
self.close()
async def on_error(self, error):
"""Default error handler for events"""
print('Error caught for the on_error event:', file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
async def on_command_error(self, error):
traceback.print_exception(type(error), error, error.__traceback__)
async def on_message(self, message):
await self.process_commands(message)
async def process_commands(self, msg):
context = Context(self, msg)
await context.invoke()
def cmd(self, name=None, *, callback=None, aliases=[]):
if isinstance(aliases, str):
aliases = [aliases]
if inspect.iscoroutinefunction(callback):
name = name or callback.__name__
cmd = Command(name=name, callback=callback, aliases=aliases)
self.commands.add(cmd)
else:
def wrapper(coro):
if not inspect.iscoroutinefunction(coro):
raise RuntimeWarning('Callback is not a coroutine!')
cmd = Command(name=name or coro.__name__, callback=coro, aliases=aliases)
self.commands.add(cmd)
return cmd
return wrapper
def add_webhook(self, name, url, **fields):
'''Register a webhook to the client.
Example:
client.register_webhook('test', url)
await client.webhooks.get('test').send('hello', embeds=em)
'''
hook = Webhook(url=url, name=name, **fields)
self.webhooks.add(hook)
```
#### File: pycord/models/channel.py
```python
from ..models.core import Snowflake, Serializable
from abc import ABC
from .embed import Embed
TEXTCHANNEL = 0
DMCHANNEL = 1
VOICECHANNEL = 2
GROUPDMCHANNEL = 3
CATEGORYCHANNEL = 4
GUILD_CHANNELS = (TEXTCHANNEL, VOICECHANNEL, CATEGORYCHANNEL)
DM_CHANNELS = (GROUPDMCHANNEL, DMCHANNEL)
class Sendable:
""" Base class for objects that can send messages """
async def send(self, content=None, *, embed=None, tts=False):
if isinstance(embed, Embed):
embed = embed.to_dict()
return await self.client.api.send_message(self, content=content, embed=embed, tts=tts)
async def trigger_typing(self):
return await self.client.api.trigger_typing(self)
class Channel(Snowflake):
def from_dict(self, data):
for attr in data:
if 'id' in attr:
setattr(self, attr, int(data[attr]))
else:
setattr(self, attr, data[attr])
class TextChannel(Sendable, Channel):
__slots__ = ("topic", "parent",'name', 'position',
'guild', 'type',
'permission_overwrites', 'id')
def __init__(self, guild, data):
self.guild = guild
self.client = self.guild.client
self.parent = self.client.channels.get(int(data.get("parent_id", 0)))
self.from_dict(data)
def __str__(self):
return self.name
def __repr__(self):
return "<TextChannel name='{0.name}' id={0.id}>".format(self)
def trigger_typing(self):
return self.client.http.send_typing(self)
class VoiceChannel(Channel):
__slots__ = ('bitrate', 'user_limit', 'parent')
def __init__(self, guild, data):
self.guild = guild
self.client = self.guild.client
self.parent = self.client.channels.get(int(data.get("parent_id", 0) or 0))
self.from_dict(data)
def __repr__(self):
return "<VoiceChannel name='{0.name}' id={0.id} bitrate={0.bitrate} limit={0.user_limit}>".format(self)
class CategoryChannel(Channel):
__slots__ = ('name', 'position', 'guild')
def __init__(self, guild, data):
self.guild = guild
self.client = self.guild.client
self.parent = self.client.channels.get(int(data.get("parent_id", 0) or 0))
self.from_dict(data)
def __str__(self):
return self.name
class DMGroupChannel(Channel, Sendable):
__slots__ = ('recipients', 'icon', 'owner')
def __init__(self, client, data):
self.client = client
self.owner = self.client.users.get(int(data.get("owner_id", 0)))
self.name = None
self.from_dict(data)
self.recipients = [self.client.users.get(int(user["id"])) for user in data.get("recipients", ())]
def trigger_typing(self):
return self.client.http.send_typing(self)
class DMChannel(Channel, Sendable):
def __init__(self, client, data):
self.client = client
self.parent = self.client.channels.get(int(data.get("parent_id", 0) or 0))
self.from_dict(data)
def trigger_typing(self):
return self.client.http.send_typing(self)
```
#### File: pycord/models/emoji.py
```python
from .core import Snowflake, Serializable
from ..models.role import Role
from ..utils import Collection
class Emoji(Snowflake, Serializable):
__slots__ = ('guild', 'id', 'name', 'roles', 'client')
def __init__(self, guild, data=None):
if data is None:
data = {}
self.guild = guild
self.client = guild.client
self.id = int(data.get('id', 0))
self.name = data.get('name', '')
self.require_colons = bool(data.get('require_colons', False))
self.managed = bool(data.get('managed', False))
self.roles = Collection(Role)
self.from_dict(data)
def from_dict(self, data):
self.id = int(data.get('id', 0))
self.name = data.get('name')
for role in data.get('roles', []):
if role:
if self.guild._roles.has(role):
rolee = self.guild._roles.get(role)
self.roles.add(rolee)
def delete(self, reason=None):
return self.client.api.delete_custom_emoji(self.guild, self, reason)
```
#### File: pycord/models/guild.py
```python
from ..models.channel import (Channel, TextChannel, VoiceChannel,
CategoryChannel, TEXTCHANNEL, VOICECHANNEL)
from ..models.core import Snowflake, Serializable
from ..models.emoji import Emoji
from ..models.role import Role
from ..models.user import Member, User
from ..utils import Collection
class Game:
def __init__(self, game):
self.name = game.get('name')
self.type = game.get('type')
# TODO: Enum for type
def __str__(self):
types = {
0: 'Playing',
1: 'Streaming',
2: 'Listening to',
3: 'Watching',
}
return f'{types.get(self.type)} {self.name}'
class Guild(Snowflake, Serializable):
__slots__ = (
'members', 'channels', 'emojis', 'roles',
'afk_timeout', 'afk_channel', 'icon',
'name', 'unavailable', 'name', 'region',
'default_role', 'member_count', 'large',
'owner_id', 'mfa_level', 'features', 'client',
'verification_level', 'explicit_content_filter', 'splash',
)
def __init__(self, client, data=None):
if data is None:
data = {}
self.client = client
self.roles = Collection(Role)
self.emojis = Collection(Emoji)
self.members = Collection(Member)
self.channels = Collection(Channel)
self.id = int(data.get("id"), 0)
self.name = None
self.icon = None
self.region = None
self.splash = None
self.mfa_level = None
self.features = None
self.verification_level = None
self.explicit_content_filter = None
if not data.get("unavailable", False):
self.from_dict(data)
else:
self.unavailable = True
def __str__(self):
return self.name
def from_dict(self, data):
self.id = int(data.get('id', 0))
self.name = data.get('name')
self.icon = data.get('icon')
self.region = data.get('region')
self.splash = data.get('splash')
self.mfa_level = data.get('mfa_level')
self.features = data.get('features', [])
self.unavailable = data.get('unavailable', True)
self.verification_level = data.get('verification_level')
self.explicit_content_filter = data.get('explicit_content_filter', False)
self.owner_id = int(data.get('owner_id'))
for channel_data in data.get('channels', []):
chan_type = channel_data.get('type', 0)
if chan_type == TEXTCHANNEL:
channel = TextChannel(self, channel_data)
elif chan_type == VOICECHANNEL:
channel = VoiceChannel(self, channel_data)
else:
channel = CategoryChannel(self, channel_data)
self.client.channels.add(channel)
self.channels.add(channel)
for role in data.get('roles', []):
self.roles.add(Role(self, role))
for emoji in data.get('emojis', []):
self.emojis.add(Emoji(self, emoji))
for member in data.get('members', []):
user = member.get('user')
if user:
user_id = int(user['id'])
if not self.client.users.has(user_id):
user = User(self.client, user)
self.client.users.add(user)
else:
user = self.client.users.get(user_id)
self.members.add(Member(self.client, self, user, member))
for presence in data.get('presences', []):
member = self.members.get(int(presence['user']['id']))
if member is None:
continue
game = presence.get('game')
member.game = Game(game) if game else None
member.status = presence.get('status')
# print(member.status)
if not member.bot:
member.user.status = member.status
member.user.game = member.game
self.owner = self.members.get(self.owner_id)
@property
def icon_url(self):
return self.icon_url_as()
def icon_url_as(self, format='png', size=1024):
return 'https://cdn.discordapp.com/icons/{0.id}/{0.icon}.{1}?size={2}'.format(self, format, size)
@property
def text_channels(self):
return tuple(filter(lambda x: isinstance(x, TextChannel), self.channels))
@property
def voice_channels(self):
return tuple(filter(lambda x: isinstance(x, VoiceChannel), self.channels))
def add_member(self, member):
self.members[member.id] = member
``` |
{
"source": "4rr0w/twitterSIte",
"score": 3
} |
#### File: flask_app/analyser/serve.py
```python
import pandas as pd
class Serve:
def __init__(self, username, since, end) -> None:
since_day = pd.to_datetime(since)
end_day = pd.to_datetime(end)
self.df = pd.read_excel("./Sentimental/%s-tweets-analysed.xlsx" % username, engine= 'openpyxl')
self.df_filtered = self.df[(self.df['created_at'] >= since_day) & (self.df['created_at'] <= end_day)]
self.username = username
def get_sentiments(self)-> dict:
dict = {}
dict["neutral"] = self.df_filtered[self.df_filtered["polarity"] == 0].shape[0]
dict["weak_positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0) & (self.df_filtered["polarity"] <= 0.3)].shape[0]
dict["positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0.3) & (self.df_filtered["polarity"] <= 0.6)].shape[0]
dict["strong_positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0.6) & (self.df_filtered["polarity"] <= 1)].shape[0]
dict["weak_negative"] = self.df_filtered[(self.df_filtered["polarity"] < 0) & (self.df_filtered["polarity"] >= -0.3)].shape[0]
dict["negative"] = self.df_filtered[(self.df_filtered["polarity"] < -0.3) & (self.df_filtered["polarity"] >= -0.6)].shape[0]
dict["strong_negative"] = self.df_filtered[(self.df_filtered["polarity"] < -0.6) & (self.df_filtered["polarity"] >= -1)].shape[0]
return dict
```
#### File: flask_app/analyser/tweets.py
```python
import os
import tweepy
from huepy import * # refer to this https://github.com/s0md3v/huepy
import re
import time
import pandas as pd
from pandas import DataFrame
from mtranslate import translate
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
class Twitter:
def __init__(self) -> None:
self.df_extracted = DataFrame()
self.df_translated = DataFrame()
self.df_filtered = DataFrame()
self.df_stopwords = DataFrame()
self.df_sentiments = DataFrame()
nltk.download("punkt")
nltk.download('stopwords')
def extract_tweets(self, username, since):
date_since = time.strptime(since, "%Y-%m-%d")
# Function to read env file for api keys
def get_api(path: str) -> dict:
with open(path, 'r') as f:
return dict(tuple(line.replace('\n', '').split('=')) for line in f.readlines() if not line.startswith('#'))
# this variable stores dict for all the keys/data that should not be uploaded to github publicly.
keys: dict = get_api(".env")
# set Oauth keys for tweepy
auth = tweepy.OAuthHandler(keys['API_key'], keys['API_secret'])
auth.set_access_token(keys['access_token'],
keys['access_token_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
# Get user's tweets object
tweets = api.user_timeline(screen_name=username,
# 200 is the maximum allowed count
count=200,
include_rts=False,
# Necessary to keep full_text
# otherwise only the first 140 words are extracted
tweet_mode='extended'
)
all_tweets = []
all_tweets.extend(tweets)
oldest_id = tweets[-1].id
try:
while True:
tweet_date = tweets[-1].created_at.strftime("%Y-%m-%d")
oldest_tweet_date = time.strptime(tweet_date, "%Y-%m-%d")
if(oldest_tweet_date < date_since):
break
tweets = api.user_timeline(screen_name=username,
# 200 is the maximum allowed count
count=200,
include_rts=False,
max_id=oldest_id - 1,
# Necessary to keep full_text
# otherwise only the first 140 words are extracted
tweet_mode='extended'
)
oldest_id = tweets[-1].id
# if len(tweets) == 0:
# break
all_tweets.extend(tweets)
except:
print(info("Tweet limit reached for user: %s" % username))
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str,
tweet.created_at,
tweet.favorite_count,
tweet.retweet_count,
tweet.full_text.encode("utf-8").decode("utf-8")]
for idx, tweet in enumerate(all_tweets)]
self.df_extracted = DataFrame(outtweets, columns=[
"id", "created_at", "favorite_count", "retweet_count", "text"])
print(good('Successfully extracted raw tweets for %s' % username))
def translate_tweets(self, username):
self.df_translated = self.df_extracted
# concat multiple tweets for fast translations
merged_tweets_df = pd.DataFrame(self.df_extracted.groupby(
self.df_translated.index // 7)["text"].agg(" ENDOFTWEETS ".join))
list = []
for index, row in merged_tweets_df.iterrows():
list.append(translate(row.text.encode(
"utf-8").decode("utf-8"), "en"))
final_translated_tweets = []
for merged_tweets in list:
for tweet in merged_tweets.split(" ENDOFTWEETS "):
final_translated_tweets.append(tweet)
self.df_translated["translated-text"] = pd.Series(
final_translated_tweets)
print(good('Successfully translated tweets for %s' % username))
def filter_tweets(self, username):
self.df_filtered = self.df_translated
# concat multiple tweets for fast cleaning
list = []
for index, row in self.df_filtered.iterrows():
# remove @usernames and special characters(*,!,.,?)
clean_text = ' '.join(re.sub(
"(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w +:\ / \ / \S +)", " ", str(row["translated-text"])).split())
# remove urls and links
clean_text = re.sub(
'https?://[A-Za -z0-9./]+', '', str(clean_text))
clean_text = clean_text.lower()
# remove #xyz to xyz
clean_text = re.sub("[^a-zA-Z]", " ", str(clean_text))
list.append(clean_text)
self.df_filtered["filtered-text"] = pd.Series(list)
print(good('Successfully filtered tweets for %s' % username))
def remove_stopwords(self, username):
self.df_stopwords = self.df_filtered
# concat multiple tweets for fast cleaning
list = []
stop_words = stopwords.words("english")
stop_words.extend(["the", "in", "jii", "ji", "shri", "shree", "mati", "https", "co", "com"])
for index, row in self.df_stopwords.iterrows():
# remove @usernames and special characters(*,!,.,?)
text = str(row["filtered-text"])
text_tokens = word_tokenize(text)
tokens_without_sw = [word for word in text_tokens if not word in stop_words]
filtered_sentence = (" ").join(tokens_without_sw)
list.append(filtered_sentence)
self.df_stopwords["no-stopword-text"] = pd.Series(list)
print(good('Successfully removed stopwords tweets for %s' % username))
def analyse_sentiments(self, username):
self.df_sentiments = self.df_stopwords
sentiments = DataFrame()
sentiments[['polarity', 'subjectivity']] = self.df_filtered['filtered-text'].apply(
lambda Text: pd.Series(TextBlob(str(Text)).sentiment))
self.df_sentiments[['polarity', 'subjectivity']] = sentiments
if not os.path.exists("Sentimental"):
os.makedirs("Sentimental")
# output new excel file
self.df_sentiments.to_excel(
"./Sentimental/%s-tweets-analysed.xlsx" % username, engine='openpyxl')
print(good('Successfully written analysed tweets to: "./Sentimental/' +
'%s-tweets-analysed.xls"' % username))
def main(self, username: str, start_date):
self.extract_tweets(username, start_date)
self.translate_tweets(username)
self.filter_tweets(username)
self.remove_stopwords(username)
self.analyse_sentiments(username)
```
#### File: twitterSIte/flask_app/app.py
```python
from main import api_wordcloud
from flask import Flask, request, jsonify
from flask_cors import cross_origin
from main import api_admin, api_client
import traceback
app = Flask(__name__)
# app.debug = True
@app.route('/')
@cross_origin()
def home():
return "home"
@app.route('/admin')
@cross_origin()
def admin():
username = request.args.get('username')
start = request.args.get('start')
try:
api_admin(username, start)
return "Success"
except:
return traceback.format_exc()
@app.route('/client')
@cross_origin()
def client():
username = request.args.get('username')
start = request.args.get('start')
end = request.args.get('end')
try:
sentiments = api_client(username, start, end)
return jsonify(sentiments)
except:
return traceback.format_exc()
@app.route('/wordcloud')
@cross_origin()
def wordcloud():
username = request.args.get('username')
start = request.args.get('start')
end = request.args.get('end')
try:
count = api_wordcloud(username, start, end)
return jsonify(count)
except:
return traceback.format_exc()
if __name__ == '__main__':
app.run(host='0.0.0.0', debug = True)
``` |
{
"source": "4rtemi5/imax",
"score": 2
} |
#### File: imax/tests/test_color_transforms.py
```python
from imax import color_transforms
from jax import numpy as jnp
from utils import compare
im1 = jnp.ones((3, 3, 3), dtype='uint8') * 255
im2 = jnp.zeros((3, 3, 3), dtype='uint8')
mask = jnp.array([
[[0], [0], [0]],
[[0], [1], [0]],
[[0], [0], [0]]],
dtype='bool')
def test_blend():
factor = jnp.array(0.55 * 255, dtype='uint8')
inputs = [im1, im2]
targets = (im2 * factor).astype('uint8')
outputs = color_transforms.blend(inputs[0], inputs[1], factor)
compare(inputs, targets, outputs)
def test_cutout():
inputs = im2
targets = mask.astype('uint8') * 42
outputs = color_transforms.cutout(inputs, mask, replace=42)
compare(inputs, targets, outputs)
def test_solarize():
inputs = im1
targets = im2
outputs = color_transforms.solarize(inputs, threshold=128)
compare(inputs, targets, outputs)
def test_solarize_add():
inputs = im1 * mask
targets = jnp.ones_like(im1) * 245 * mask + 10
outputs = color_transforms.solarize_add(inputs, addition=10, threshold=128)
compare(inputs, targets, outputs)
def test_gray_to_rgb_to_gray():
inputs = jnp.ones_like(im1)
``` |
{
"source": "4rtem/RushTI",
"score": 2
} |
#### File: 4rtem/RushTI/RushTI.py
```python
import asyncio
import configparser
import logging
import os
import shlex
import sys
from base64 import b64decode
from concurrent.futures import ThreadPoolExecutor
from TM1py import TM1Service
APPNAME = "RushTI"
LOGFILE = "{current_directory}/RushTI.log".format(current_directory=sys.path[0])
CONFIG = "{current_directory}/config.ini".format(current_directory=sys.path[0])
logging.basicConfig(
filename=LOGFILE,
format='%(asctime)s - RushTI - %(levelname)s - %(message)s',
level=logging.INFO)
def setup_tm1_services():
""" Return Dictionary with TM1ServerName (as in config.ini) : Instantiated TM1Service
:return: Dictionary server_names and TM1py.TM1Service instances pairs
"""
if not os.path.isfile(CONFIG):
raise ValueError("config.ini does not exist.")
tm1_services = dict()
# parse .ini
config = configparser.ConfigParser()
config.read(CONFIG)
# build tm1_services dictionary
for tm1_server_name, params in config.items():
# handle default values from configparser
if tm1_server_name != config.default_section:
try:
params["password"] = <PASSWORD>(params["password"])
tm1_services[tm1_server_name] = TM1Service(**params, session_context=APPNAME)
# Instance not running, Firewall or wrong connection parameters
except Exception as e:
logging.error("TM1 instance {} not accessible. Error: {}".format(tm1_server_name, str(e)))
return tm1_services
def decrypt_password(encrypted_password):
""" b64 decoding
:param encrypted_password: encrypted password with b64
:return: password in plain text
"""
return b64decode(encrypted_password).decode("UTF-8")
def extract_info_from_line(line):
""" Translate one line from txt file into arguments for execution: instance, process, parameters
:param line: Arguments for execution. E.g. instance="tm1srv01" process="Bedrock.Server.Wait" pWaitSec=2
:return: instance_name, process_name, parameters
"""
parameters = {}
for pair in shlex.split(line):
param, value = pair.split("=")
# if instance or process needs to be case insensitive
if param.lower() == 'process' or param.lower() == 'instance':
parameters[param.lower()] = value.strip('"').strip()
# parameters (e.g. pWaitSec) are case sensitive in TM1 REST API !
else:
parameters[param] = value.strip('"').strip()
instance_name = parameters.pop("instance")
process_name = parameters.pop("process")
return instance_name, process_name, parameters
def execute_line(line, tm1_services):
""" Execute one line from the txt file
:param line:
:param tm1_services:
:return:
"""
if len(line.strip()) == 0:
return
instance_name, process_name, parameters = extract_info_from_line(line)
if instance_name not in tm1_services:
msg = "Process {process_name} not executed on {instance_name}. {instance_name} not accessible.".format(
process_name=process_name,
instance_name=instance_name)
logging.error(msg)
return
tm1 = tm1_services[instance_name]
# Execute it
try:
msg = "Executing process: {process_name} with Parameters: {parameters} on instance: {instance_name}".format(
process_name=process_name,
parameters=parameters,
instance_name=instance_name)
logging.info(msg)
tm1.processes.execute(process_name=process_name, **parameters)
msg = "Execution Successful: {process_name} with Parameters: {parameters} on instance: {instance_name}".format(
process_name=process_name,
parameters=parameters,
instance_name=instance_name)
logging.info(msg)
except Exception as e:
msg = "Execution Failed. Process: {process}, Parameters: {parameters}, Error: {error}".format(
process=process_name,
parameters=parameters,
error=str(e))
logging.error(msg)
async def work_through_tasks(path, max_workers, tm1_services):
""" loop through file. Add all lines to the execution queue.
:param path:
:param max_workers:
:param tm1_services:
:return:
"""
with open(path) as file:
lines = file.readlines()
loop = asyncio.get_event_loop()
with ThreadPoolExecutor(max_workers=int(max_workers)) as executor:
futures = [loop.run_in_executor(executor, execute_line, line, tm1_services)
for line
in lines]
for future in futures:
await future
def logout(tm1_services):
""" logout from all instances
:param tm1_services:
:return:
"""
for tm1 in tm1_services.values():
tm1.logout()
def translate_cmd_arguments(*args):
""" Translation and Validity-checks for command line arguments.
:param args:
:return: path_to_file and max_workers
"""
# too few arguments
if len(args) < 3:
msg = "RushTI needs to executed with two arguments."
logging.error(msg)
raise ValueError(msg)
# txt file doesnt exist
if not os.path.isfile(args[1]):
msg = "Argument 1 (path to file) invalid. File needs to exist."
logging.error(msg)
raise ValueError(msg)
# max_workers is not a number
if not args[2].isdigit():
msg = "Argument 2 (max workers) invalid. Needs to be number."
logging.error(msg)
raise ValueError(msg)
return args[1], args[2]
# recieves two arguments: 1) path-to-txt-file, 2) max-workers
if __name__ == "__main__":
logging.info("{app_name} starts. Parameters: {parameters}.".format(
app_name=APPNAME,
parameters=sys.argv))
# read commandline arguments
path_to_file, max_workers = translate_cmd_arguments(*sys.argv)
# setup connections
tm1_services = setup_tm1_services()
# execution
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(work_through_tasks(path_to_file, max_workers, tm1_services))
finally:
logout(tm1_services)
loop.close()
logging.info("{app_name} ends".format(app_name=APPNAME))
``` |
{
"source": "4rund3v/banix",
"score": 3
} |
#### File: src/email/email_client.py
```python
import os
import sys
import smtplib
from email.mime.text import MIMEText
import bottle
from bottle import run, route, error
import json
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from email_template import generate_email_template
from email_config import SENDER_EMAIL_ID, SENDER_EMAIL_PASSWORD
class EmailClient:
def __init__(self, smtp_server="smtp.gmail.com", smtp_port=587,\
senders_email=None, senders_password=None, raise_exception=False):
self.smtp_host = smtp_server
self.smtp_port = smtp_port
self.email_id = senders_email
self.password = <PASSWORD>
self.logged_in = False
self.raise_exception = False
self.server = None
def connect(self):
try:
self.server = smtplib.SMTP(self.smtp_host, self.smtp_port,None,30)
self.server.starttls()
return True
except Exception as ex:
print("EmailClient.connect: Exception(: %s) while connecting to smtp server." %str(ex))
if self.raise_exception:
raise Exception(ex)
return False
def close(self):
try:
self.server.close()
return True
except Exception as ex:
print("EmailClient.close: Exception(: %s) while closing smtp server connection." %str(ex))
if self.raise_exception:
raise Exception(ex)
return False
def login(self, senders_email=None, senders_password=None):
try:
if not self.connect():
return False
email = senders_email if senders_email else self.email_id
password = senders_password if senders_password else self.password
self.server.login(email, password)
self.logged_in = True
return True
except Exception as ex:
print("EmailClient.login: Exception(: %s) in login" %str(ex))
if self.raise_exception:
raise Exception(ex)
return False
def logout(self):
try:
if self.logged_in:
self.server.quit()
self.close()
return True
except Exception as ex:
print("EmailClient.logout: Exception(: %s) in logout" %str(ex))
if self.raise_exception:
raise Exception(ex)
self.logged_in = False
return False
def sendEmail(self, recipients, subject, body, files =[], logout=True):
status = "failed"
try:
assert type(files)==list
if self.email_id and self.password:
self.login()
else:
raise Exception("Login failed; senders email and password not provided.")
recipients_str = ""
if isinstance(recipients, list):
recipients_str = ", ".join(recipients)
elif type(recipients) in [str, unicode]:
recipients_str = recipients
recipients = [recipients]
#mail = MIMEText(body)
mail = MIMEMultipart()
mail["Subject"] = subject
mail["From"] = self.email_id
mail["To"] = recipients_str
mail.attach(MIMEText(body,'html'))
self.add_attachments_to_mail(mail,files)
self.server.sendmail(self.email_id, recipients, mail.as_string())
status = "success"
except Exception as ex:
print("EmailClient.sendEmail: Failed sending email due to Exception(: %s)" %str(ex))
if self.raise_exception:
raise Exception(ex)
self.logout()
return status
def add_attachments_to_mail(self,mail,file_path=[]):
for _file in file_path:
if not os.path.isfile(_file):
continue
ctype, encoding = mimetypes.guess_type(_file)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(_file)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(_file, 'rb')
msg = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(_file, 'rb')
msg = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(_file, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
filename = os.path.basename(_file)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
mail.attach(msg)
return mail
@route("/send_email", method="POST")
def send_email():
#Input: {"username":"Arun", "email_id": "<EMAIL>"}
info = json.load(bottle.request.body)
print("[send_email] Info: {}".format(info))
mail_man = EmailClient(senders_email = SENDER_EMAIL_ID, senders_password = SENDER_EMAIL_PASSWORD, raise_exception=True)
mail_body = generate_email_template(info["username"])
try:
mail_status = mail_man.sendEmail([info["email_id"]], "your order placed", mail_body, logout=True)
print("[send_email] mail status: {}".format(mail_status))
except Exception as ex:
print("[send_email] Exception is {}".format(ex))
if __name__ == '__main__':
run(host='0.0.0.0', port=7702, server='twisted')
```
#### File: src/email/email_template.py
```python
def generate_email_template(username):
"""
Prepared HTML Email template
returns html_template
"""
return '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<title>banix order confirmation</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style type="text/css">
/**
* Google webfonts. Recommended to include the .woff version for cross-client compatibility.
*/
@media screen {
@font-face {
font-family: 'Source Sans Pro';
font-style: normal;
font-weight: 400;
src: local('Source Sans Pro Regular'), local('SourceSansPro-Regular');
}
@font-face {
font-family: 'Source Sans Pro';
font-style: normal;
font-weight: 700;
src: local('Source Sans Pro Bold'), local('SourceSansPro-Bold');
}
}
/**
* Remove blue links for iOS devices.
*/
a[x-apple-data-detectors] {
font-family: inherit !important;
font-size: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
color: inherit !important;
text-decoration: none !important;
}
/**
* Fix centering issues in Android 4.4.
*/
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
body {
width: 100% !important;
height: 100% !important;
padding: 0 !important;
margin: 0 !important;
}
a {
color: #1a82e2;
}
</style>
</head>
<body style="background-color: #e9ecef;">
<p>Dear '''+username+''',</p>
<p>
Greetings of the day,<br>
Your banix order is placed successfully.
Our team is working round the clock to deliver your order at the earliest.<br>
For any clarification contact <EMAIL>
</p>
<p>Thanks<br>Team banix</p>
</body>
</html>
'''
```
#### File: src/models/cart.py
```python
from src.models import Base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
class Cart(Base):
__tablename__ = "cart"
cart_id = Column(Integer, primary_key=True, autoincrement=True)
customer_id = Column(Integer, ForeignKey('customers.customer_id'))
cart_items = relationship("CartItem", backref="cart_item")
def __repr__(self):
return f"""<Cart(cart_id={self.cart_id}) email_id={self.email_id}>"""
def to_dict(self):
cart_items = []
if self.cart_items:
cart_items = [cart_item.to_dict() for cart_item in self.cart_items]
return dict(cart_id=self.cart_id,
customer_id=self.customer_id,
cart_items = cart_items
)
class CartItem(Base):
__tablename__ = "cart_items"
cart_item_id = Column(Integer, primary_key=True, autoincrement=True)
cart_id = Column(Integer, ForeignKey('cart.cart_id'))
cart_item_qty = Column(Integer, nullable=False)
product_id = Column(Integer, ForeignKey('products.product_id'))
def __repr__(self):
return f"""<CartItem(cart_item_id={self.cart_item_id}) cart_item_qty={self.cart_item_qty} product_id={self.product_id}>"""
def to_dict(self):
return dict(cart_item_id=self.cart_item_id,
cart_item_qty=self.cart_item_qty,
product_id = self.product_id
)
```
#### File: src/models/orders.py
```python
from src.models import Base
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.orm import relationship
from datetime import datetime
class Orders(Base):
__tablename__ = "orders"
order_id = Column(Integer, primary_key=True, autoincrement=True)
order_customer_id = Column(Integer, ForeignKey("customers.customer_id"), nullable=False)
order_info_id = Column(String(100), unique=True, nullable=False)
order_date = Column(String(30), nullable=False)
order_created_datetime = Column(DateTime(timezone=True), default=datetime.now)
order_updated_datetime = Column(DateTime(timezone=True), default=datetime.now, onupdate=datetime.now)
order_price = relationship("OrderPrice", uselist=False, cascade="all, delete, delete-orphan")
order_status = relationship("OrderStatus", uselist=False, cascade="all, delete, delete-orphan")
payment_info = relationship("PaymentInfo", uselist=False, back_populates="orders")
order_items = relationship("OrderItem", cascade="all, delete, delete-orphan")
order_shipping_address = relationship("Address", uselist=False,cascade="all, delete, delete-orphan")
order_shipping_info = relationship("OrderShippingInfo", uselist=False, cascade="all, delete, delete-orphan")
def __repr__(self):
return f"""<Orders order_id{self.order_id} order_date{self.order_date}>"""
def to_dict(self):
order_price = {}
if self.order_price:
order_price = self.order_price.to_dict()
order_status = {}
if self.order_status:
order_status = self.order_status.to_dict()
payment_info = {}
if self.payment_info:
payment_info = self.payment_info.to_dict()
order_items = []
if self.order_items:
for order_item in self.order_items:
order_items.append(order_item.to_dict())
order_shipping_address = {}
if self.order_shipping_address:
order_shipping_address = self.order_shipping_address.to_dict()
order_shipping_info = {}
if self.order_shipping_info:
order_shipping_info = self.order_shipping_info.to_dict()
return dict(order_id=self.order_id,
order_customer_id=self.order_customer_id,
order_info_id=self.order_info_id,
payment_info=payment_info,
order_items=order_items,
order_date=self.order_date,
order_price=order_price,
order_shipping_address=order_shipping_address,
order_shipping_info=order_shipping_info,
order_status=order_status,
order_created_datetime=str(self.order_created_datetime),
order_updated_datetime=str(self.order_updated_datetime),
)
class OrderPrice(Base):
__tablename__ = "order_price"
order_price_id = Column(Integer, primary_key=True, autoincrement=True)
order_foreign_id = Column(Integer, ForeignKey("orders.order_id"), nullable=False)
orders = relationship("Orders", back_populates="order_price")
order_total_price = Column(Integer, nullable=False)
order_shipping_price = Column(Integer, nullable=False)
order_tax_price = Column(Integer, nullable=False)
order_selling_price = Column(Integer, nullable=False)
def to_dict(self):
return dict(
order_price_id=self.order_price_id,
order_foreign_id=self.order_foreign_id,
order_total_price=self.order_total_price,
order_shipping_price=self.order_shipping_price,
order_tax_price=self.order_tax_price,
order_selling_price=self.order_selling_price,
)
class OrderStatus(Base):
__tablename__ = "order_status"
order_status_id = Column(Integer, primary_key=True, autoincrement=True)
order_foreign_id = Column(Integer, ForeignKey("orders.order_id"), nullable=False)
orders = relationship("Orders", back_populates="order_status")
status = Column(String(30), nullable=False)
def to_dict(self):
return dict(order_status_id=self.order_status_id,
status=self.status)
class OrderItem(Base):
__tablename__ = "order_items"
order_item_id = Column(Integer, primary_key=True, autoincrement=True)
order_foreign_id = Column(Integer, ForeignKey("orders.order_id"), nullable=False)
orders = relationship("Orders", back_populates="order_items")
order_product_foreign_id = Column(Integer, ForeignKey("products.product_id"), nullable=False)
order_item_product_name = Column(String(300), nullable=False)
order_item_total_price = Column(Integer, nullable=False)
order_item_selling_price = Column(Integer, nullable=False)
order_item_shipping_price = Column(Integer, nullable=False)
order_item_tax_price = Column(Integer, nullable=False)
order_item_quantity = Column(Integer, nullable=False)
def __repr__(self):
return f"""<OrderItem order_item_id{self.order_item_id} order foreign key {self.order_foreign_id}> order_item_name {self.order_item_product_name}"""
def to_dict(self):
return dict(order_item_id=self.order_item_id,
order_item_product_name=self.order_item_product_name,
order_foreign_id=self.order_foreign_id,
order_product_foreign_id=self.order_product_foreign_id,
order_item_total_price=self.order_item_total_price,
order_item_selling_price=self.order_item_selling_price,
order_item_shipping_price=self.order_item_shipping_price,
order_item_tax_price=self.order_item_tax_price,
order_item_quantity=self.order_item_quantity)
class OrderShippingInfo(Base):
__tablename__ = "order_shipping_info"
order_item_shipping_info_id = Column(Integer, primary_key=True, autoincrement=True)
order_shipping_status = Column(String, nullable=False)
order_foreign_id = Column(Integer, ForeignKey("orders.order_id"), nullable=False)
shipping_info_created_datetime = Column(DateTime(), default=datetime.now)
shipping_info_updated_datetime = Column(DateTime(), default=datetime.now, onupdate=datetime.now)
def __repr__(self):
return f"""<OrderShippingInfo order_item_shipping_info_id{self.order_item_shipping_info_id} order foreign key {self.order_foreign_id}>"""
def to_dict(self):
return dict(
order_item_shipping_info_id=self.order_item_shipping_info_id,
order_shipping_status=self.order_shipping_status,
order_foreign_id=self.order_foreign_id,
shipping_info_created_datetime=str(self.shipping_info_created_datetime),
shipping_info_updated_datetime=str(self.shipping_info_updated_datetime)
)
```
#### File: src/shiprocket/client.py
```python
from configuration import shiprocket_username, shiprocket_password
from src.shiprocket import constants as shiprocket_consts
from requests import session
import json
import uuid
class ShiprocketClient():
"""
Client to handle the communication with the shiprocket apis.
"""
def __init__(self):
self.active_session = None
self._authorization_post_data = json.dumps({
"email": shiprocket_username,
"password": <PASSWORD>
})
pass
def connect(self):
"""
Creates an connection to the shiprocket server
"""
try:
_session = session()
_session.headers.update({'Content-Type': 'application/json'})
resp = _session.post(shiprocket_consts.AUTHENTICATION_API,
data=self._authorization_post_data)
resp.raise_for_status()
resp_data = resp.json()
_session.headers.update({shiprocket_consts.AUTHENTICATION_HEADER_NAME: "{} {}".format(shiprocket_consts.AUTHENTICATION_TOKEN_PREFIX, resp_data["token"]) })
print(f"[ShiprocketClient][connect] Active Session Headers :: {_session.headers}")
self.active_session = _session
return True
except Exception as ex:
print(f"[ShiprocketClient][connect] Unable to connect to the shiprocket client : {ex}")
return False
def check_serviceability(self, product_weight:int, src_pin_code: int, dst_pin_code: int):
"""
Check the serviceablity of the courier to the given pin code.
"""
serviceability = {
"estimated_delivery_days": -1
}
try:
self.active_session.headers.update({'Content-Type': 'application/json'})
query_data = json.dumps({"weight": product_weight, "cod": 0,
"pickup_postcode": src_pin_code,
"delivery_postcode": dst_pin_code })
print(f"[ShiprocketClient][check_serviceability] Data prepared is : {query_data}")
resp = self.active_session.get(shiprocket_consts.COURIER_SERVICEABILITY_URL, data=query_data)
resp.raise_for_status()
if resp.status_code == 404:
serviceability["estimated_delivery_days"] = -1
else:
resp_data = resp.json()
# print(f"[ShiprocketClient][check_serviceability] The serviceablity response is :: {resp_data}")
recommended_option = resp_data["data"]["available_courier_companies"][0]
serviceability["estimated_delivery_days"] = recommended_option["estimated_delivery_days"]
serviceability["courier_name"] = recommended_option["courier_name"]
serviceability["courier_company_id"] = recommended_option["courier_company_id"]
serviceability["rate"] = recommended_option["rate"]
serviceability["dst_pin_code"] = dst_pin_code
serviceability["serviceability_id"] = str(uuid.uuid4())
print(f"[ShiprocketClient][check_serviceability] The serviceability info prepared is : {serviceability}")
except Exception as ex:
print(f"[ShiprocketClient][check_serviceability] Unable to fetch the serviceablity : {ex}")
return serviceability
def check_delivery_cost(self, product_weight:int, src_pin_code: int, dst_pin_code: int):
delivery_cost = -1
try:
self.active_session.headers.update({'Content-Type': 'application/json'})
query_data = json.dumps({"weight": product_weight, "cod": 0,
"pickup_postcode": src_pin_code,
"delivery_postcode": dst_pin_code })
print(f"[ShiprocketClient][check_delivery_cost] Data prepared is : {query_data}")
resp = self.active_session.get(shiprocket_consts.COURIER_SERVICEABILITY_URL, data=query_data)
resp.raise_for_status()
if resp.status_code == 404:
serviceability["estimated_delivery_days"] = -1
else:
resp_data = resp.json()
# print(f"[ShiprocketClient][check_delivery_cost] The serviceablity response is :: {resp_data}")
recommended_option = resp_data["data"]["available_courier_companies"][0]
delivery_cost = recommended_option["rate"]
print(f"[ShiprocketClient][check_delivery_cost] The delivery cost info is : {delivery_cost}")
except Exception as ex:
print(f"[ShiprocketClient][check_delivery_cost] Unable to fetch the delivery cost : {ex}")
return delivery_cost
```
#### File: banix_server/src/utils.py
```python
import datetime
from src.constants import TIMESTAMP_FORMAT
def json_friendly(obj):
if not obj or type(obj) in (int, float, str):
return obj
if type(obj) == datetime.datetime:
return obj.strftime("%Y-%m-%d %H:%M:%S")
if type(obj) == dict:
for k in obj:
obj[k] = json_friendly(obj[k])
return obj
if type(obj) == list:
for i, v in enumerate(obj):
obj[i] = json_friendly(v)
return obj
if type(obj) == tuple:
temp = []
for v in obj:
temp.append(json_friendly(v))
return tuple(temp)
return str(obj)
``` |
{
"source": "4rzael/krocs",
"score": 3
} |
#### File: krocs/utils/frames.py
```python
from PyQt5.QtWidgets import QFrame, QLabel
""" Proprietary imports. """
from .colors import valid_color, intermediate_color, critical_color
class SunkenHLine(QFrame):
def __init__(self):
super(SunkenHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class StatusLabel(QLabel):
def __init__(self, text):
super(QLabel, self).__init__()
self.setText(text)
self.setAutoFillBackground(True)
self.setStyleSheet(
"QLabel { margin-left: 5px; border-radius: 5px; " +
"background-color: %s; }" % critical_color)
def setValid(self):
self.setStyleSheet(
"QLabel { margin-left: 5px; border-radius: 5px; " +
"background-color: %s; }" % valid_color)
def setIntermediate(self):
self.setStyleSheet(
"QLabel { margin-left: 5px; border-radius: 5px; " +
"background-color: %s; }" % intermediate_color)
def setCritical(self):
self.setStyleSheet(
"QLabel { margin-left: 5px; border-radius: 5px; " +
"background-color: %s; }" % critical_color)
```
#### File: krocs/widgets/connection_window.py
```python
from PyQt5.QtCore import QRegExp, Qt
from PyQt5.QtGui import QRegExpValidator, QValidator
from PyQt5.QtWidgets import (QWidget, QLabel, QLineEdit, QPushButton,
QFormLayout)
""" Proprietary imports. """
from utils import (SunkenHLine, StatusLabel, valid_color, intermediate_color,
critical_color)
class ConnectionWindow(QWidget):
""" Connection to kRPC server window."""
def __init__(self, conn):
super(ConnectionWindow, self).__init__()
""" Set window modality """
self.setWindowModality(Qt.WindowModal)
""" Connection object as attribute. """
self.conn = conn
self.conn.synced.connect(self._conn_synced)
self.conn.unsynced.connect(self._conn_unsynced)
""" Connection form attributes definitions. """
name_lbl = QLabel("Connection name:")
self.name = QLineEdit("Default")
name_regexp = QRegExp("^[\w\-\s]+$")
name_validator = QRegExpValidator(name_regexp)
self.name.setValidator(name_validator)
self.name.textChanged.connect(self.validate_form)
addr_lbl = QLabel("Server address:")
self.addr = QLineEdit("127.0.0.1")
ipv4_regexp = QRegExp("((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}" +
"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")
ipv4_validator = QRegExpValidator(ipv4_regexp)
self.addr.setValidator(ipv4_validator)
self.addr.textChanged.connect(self.validate_form)
rpc_port_lbl = QLabel("RPC port:")
self.rpc_port = QLineEdit("50000")
tcp_regexp = QRegExp("([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65" +
"[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])")
tcp_validator = QRegExpValidator(tcp_regexp)
self.rpc_port.setValidator(tcp_validator)
self.rpc_port.textChanged.connect(self.validate_form)
stream_port_lbl = QLabel("Stream port:")
self.stream_port = QLineEdit("50001")
self.stream_port.setValidator(tcp_validator)
self.stream_port.textChanged.connect(self.validate_form)
self.sync_btn = QPushButton("Connect")
self.sync_btn.setDefault(True)
self.sync_btn.clicked.connect(lambda:self._request_sync())
status_lbl = QLabel("Connection status:")
self.status = StatusLabel("Not connected.")
self.unsync_btn = QPushButton("Disconnect")
if self.conn.connected == False:
self.unsync_btn.setEnabled(False)
self.unsync_btn.clicked.connect(lambda:self._request_unsync())
""" Connection form layout definition. """
fbox = QFormLayout()
fbox.addRow(name_lbl, self.name)
fbox.addRow(addr_lbl, self.addr)
fbox.addRow(rpc_port_lbl, self.rpc_port)
fbox.addRow(stream_port_lbl, self.stream_port)
fbox.addRow(self.sync_btn)
fbox.addRow(SunkenHLine())
fbox.addRow(status_lbl, self.status)
fbox.addRow(self.unsync_btn)
""" Connection window layout definition. """
self.setLayout(fbox)
self.setMinimumSize(400, 300)
self.setMaximumSize(400, 300)
self.setWindowTitle("Connection to kRPC server")
""" Signals emission to perform first validation fullfilling decoration
purposes """
self.name.textChanged.emit(self.name.text())
self.addr.textChanged.emit(self.addr.text())
self.rpc_port.textChanged.emit(self.rpc_port.text())
self.stream_port.textChanged.emit(self.stream_port.text())
def validate_form(self):
""" form validation for each attributes, changing attributes background
color and connection button enabled property. """
sender = self.sender()
validator = sender.validator()
state = validator.validate(sender.text(), 0)[0]
if state == QValidator.Acceptable:
color = valid_color
elif state == QValidator.Intermediate:
color = intermediate_color
else:
color = critical_color
sender.setStyleSheet("QLineEdit { background-color: %s }" % color)
name_validator = self.name.validator()
addr_validator = self.addr.validator()
rpc_port_validator = self.rpc_port.validator()
stream_port_validator = self.stream_port.validator()
name_state = name_validator.validate(self.name.text(), 0)[0]
addr_state = addr_validator.validate(self.addr.text(), 0)[0]
rpc_port_state = rpc_port_validator.validate(self.rpc_port.text(), 0)[0]
stream_port_state = stream_port_validator.validate(
self.stream_port.text(), 0)[0]
if (name_state == QValidator.Acceptable and
addr_state == QValidator.Acceptable and
rpc_port_state == QValidator.Acceptable and
stream_port_state == QValidator.Acceptable):
self.sync_btn.setEnabled(True)
else:
self.sync_btn.setEnabled(False)
def _request_sync(self):
""" Requesting a connection object from a kRPC server. """
self.conn.sync(self.name.text(), self.addr.text(),
int(self.rpc_port.text()), int(self.stream_port.text()))
def _request_unsync(self):
""" Terminating a connection. """
self.conn.unsync()
self.unsync_btn = QPushButton("Close connection")
def _conn_synced(self):
""" Updating view if new connection. """
self.status.setText("Connected to %s." % self.conn.addr)
self.status.setValid()
self.unsync_btn.setEnabled(True)
def _conn_unsynced(self):
""" Updating view if closed connection. """
self.status.setText("Not connected.")
self.status.setCritical()
self.unsync_btn.setEnabled(False)
```
#### File: widgets/main_window/vessels_menu.py
```python
from ..vessel_window import VesselWindow
from PyQt5.QtWidgets import QMainWindow
""" "Vessels" menu handler in main window """
class VesselsMenu(object):
def __init__(self, window, vessels, bar):
self.window = window
self.vessels = vessels
self.bar = bar
self.menu = self.bar.addMenu('Vessels')
self.vessel_windows = {}
""" Updating menu content """
def vessels_updated(vessels):
self.menu.clear()
for vessel in vessels:
action = self.menu.addAction(vessel.name)
action.setCheckable(True)
action.toggled.connect(lambda *args: self.on_vessel_click(action, vessel))
self.vessels.updated.connect(vessels_updated)
def on_vessel_click(self, action, vessel):
if vessel in self.vessel_windows.keys():
self.on_vessel_deselection(action, vessel)
else:
self.on_vessel_selection(action, vessel)
def on_vessel_selection(self, action, vessel):
self.vessel_windows[vessel] = VesselWindow(
self.window.conn,
self.window.objects,
vessel,
self.window
)
self.vessel_windows[vessel].show()
def on_vessel_deselection(self, action, vessel):
self.window.mdi.removeSubWindow(self.vessel_windows[vessel])
del self.vessel_windows[vessel]
``` |
{
"source": "4sarathbabu/blazar",
"score": 2
} |
#### File: v1/networks/service.py
```python
from blazar import context
from blazar.manager.service import get_plugins
from blazar import policy
from blazar.utils import trusts
class API(object):
def __init__(self):
self.plugin = get_plugins()["network"]
@policy.authorize('networks', 'get')
def get_networks(self):
"""List all existing networks."""
return self.plugin.list_networks()
@policy.authorize('networks', 'post')
@trusts.use_trust_auth()
def create_network(self, data):
"""Create new network.
:param data: New network characteristics.
:type data: dict
"""
return self.plugin.create_network(data)
@policy.authorize('networks', 'get')
def get_network(self, network_id):
"""Get network by its ID.
:param network_id: ID of the network in Blazar DB.
:type network_id: str
"""
return self.plugin.get_network(network_id)
@policy.authorize('networks', 'put')
def update_network(self, network_id, data):
"""Update network. Only name changing may be proceeded.
:param network_id: ID of the network in Blazar DB.
:type network_id: str
:param data: New network characteristics.
:type data: dict
"""
return self.plugin.update_network(network_id, data)
@policy.authorize('networks', 'delete')
def delete_network(self, network_id):
"""Delete specified network.
:param network_id: ID of the network in Blazar DB.
:type network_id: str
"""
self.plugin.delete_network(network_id)
@policy.authorize('networks', 'get_allocations')
def list_allocations(self, query):
"""List all allocations on all network segments.
:param query: parameter to query allocations
:type query: dict
"""
ctx = context.current()
detail = False
if policy.enforce(ctx, 'admin', {}, do_raise=False):
detail = True
return self.plugin.list_allocations(query, detail=detail)
@policy.authorize('networks', 'get_allocations')
def get_allocations(self, network_id, query):
"""List all allocations on a specificied network segment.
:param network_id: ID of the network segment in Blazar BDself.
:type network_id: str
:param query: parameters to query allocation
:type query: dict
"""
return self.plugin.get_allocations(network_id, query)
@policy.authorize('networks', 'get_resource_properties')
def list_resource_properties(self, query):
"""List resource properties for networks."""
return self.plugin.list_resource_properties(query)
@policy.authorize('networks', 'patch_resource_properties')
def update_resource_property(self, property_name, data):
"""Update a network resource property."""
return self.plugin.update_resource_property(property_name, data)
```
#### File: alembic_migrations/versions/cffa05fd6878_add_soft_delete_to_most_blazar_models.py
```python
revision = '<KEY>'
down_revision = 'afd0a1c7748a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('computehost_allocations',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('computehost_allocations',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
op.drop_constraint(u'computehost_allocations_ibfk_1',
'computehost_allocations', type_='foreignkey')
op.add_column('computehost_reservations',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('computehost_reservations',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
op.add_column('events',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('events',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
op.add_column('instance_reservations',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('instance_reservations',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
op.add_column('leases',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('leases',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
op.add_column('reservations',
sa.Column('deleted', sa.String(length=36), nullable=True))
op.add_column('reservations',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('reservations', 'deleted_at')
op.drop_column('reservations', 'deleted')
op.drop_column('leases', 'deleted_at')
op.drop_column('leases', 'deleted')
op.drop_column('instance_reservations', 'deleted_at')
op.drop_column('instance_reservations', 'deleted')
op.drop_column('events', 'deleted_at')
op.drop_column('events', 'deleted')
op.drop_column('computehost_reservations', 'deleted_at')
op.drop_column('computehost_reservations', 'deleted')
op.create_foreign_key(u'computehost_allocations_ibfk_1',
'computehost_allocations', 'computehosts',
['compute_host_id'], ['id'])
op.drop_column('computehost_allocations', 'deleted_at')
op.drop_column('computehost_allocations', 'deleted')
```
#### File: db/sqlalchemy/api.py
```python
import sys
from oslo_config import cfg
from blazar.db import exceptions as db_exc
from blazar.db.sqlalchemy import facade_wrapper
from blazar.db.sqlalchemy import models
from oslo_db import exception as common_db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
EXTRA_CAPABILITY_MODELS = {
'physical:host': models.ComputeHostExtraCapability,
'network': models.NetworkSegmentExtraCapability,
'device': models.DeviceExtraCapability,
}
LOG = logging.getLogger(__name__)
get_engine = facade_wrapper.get_engine
get_session = facade_wrapper.get_session
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def _read_deleted_filter(query, db_model, deleted):
if 'deleted' not in db_model.__table__.columns:
return query
default_deleted_value = None
if not deleted:
query = query.filter(db_model.deleted == default_deleted_value)
return query
def model_query(model, session=None, deleted=False):
"""Query helper.
:param model: base model to query
"""
session = session or get_session()
return _read_deleted_filter(session.query(model), model, deleted)
def setup_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.error("Database registration exception: %s", e)
return False
return True
def drop_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.drop_all(engine)
except Exception as e:
LOG.error("Database shutdown exception: %s", e)
return False
return True
# Helpers for building constraints / equality checks
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return sa.or_([field == value for value in self.values])
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
# Reservation
def _reservation_get(session, reservation_id):
query = model_query(models.Reservation, session)
return query.filter_by(id=reservation_id).first()
def reservation_get(reservation_id):
return _reservation_get(get_session(), reservation_id)
def reservation_get_all():
query = model_query(models.Reservation, get_session())
return query.all()
def reservation_get_all_by_lease_id(lease_id):
reservations = (model_query(models.Reservation,
get_session()).filter_by(lease_id=lease_id))
return reservations.all()
def reservation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
reservation_query = model_query(models.Reservation, get_session())
for name, value in kwargs.items():
column = getattr(models.Reservation, name, None)
if column:
reservation_query = reservation_query.filter(column == value)
return reservation_query.all()
def reservation_create(values):
values = values.copy()
reservation = models.Reservation()
reservation.update(values)
session = get_session()
with session.begin():
try:
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
return reservation_get(reservation.id)
def reservation_update(reservation_id, values):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
reservation.update(values)
reservation.save(session=session)
return reservation_get(reservation_id)
def _reservation_destroy(session, reservation):
if reservation.instance_reservation:
reservation.instance_reservation.soft_delete(session=session)
if reservation.computehost_reservation:
reservation.computehost_reservation.soft_delete(session=session)
if reservation.network_reservation:
reservation.network_reservation.soft_delete(session=session)
if reservation.floatingip_reservation:
reservation.floatingip_reservation.soft_delete(session=session)
if reservation.computehost_allocations:
for computehost_allocation in reservation.computehost_allocations:
computehost_allocation.soft_delete(session=session)
if reservation.network_allocations:
for network_allocation in reservation.network_allocations:
network_allocation.soft_delete(session=session)
if reservation.floatingip_allocations:
for floatingip_allocation in reservation.floatingip_allocations:
floatingip_allocation.soft_delete(session=session)
reservation.soft_delete(session=session)
def reservation_destroy(reservation_id):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
if not reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(id=reservation_id,
model='Reservation')
_reservation_destroy(session, reservation)
# Lease
def _lease_get(session, lease_id):
query = model_query(models.Lease, session)
return query.filter_by(id=lease_id).first()
def lease_get(lease_id):
return _lease_get(get_session(), lease_id)
def lease_get_all():
query = model_query(models.Lease, get_session())
return query.all()
def lease_get_all_by_project(project_id):
raise NotImplementedError
def lease_get_all_by_user(user_id):
raise NotImplementedError
def lease_list(project_id=None):
query = model_query(models.Lease, get_session())
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
def lease_create(values):
values = values.copy()
lease = models.Lease()
reservations = values.pop("reservations", [])
events = values.pop("events", [])
lease.update(values)
session = get_session()
with session.begin():
try:
lease.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=lease.__class__.__name__, columns=e.columns)
try:
for r in reservations:
reservation = models.Reservation()
reservation.update({"lease_id": lease.id})
reservation.update(r)
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
try:
for e in events:
event = models.Event()
event.update({"lease_id": lease.id})
event.update(e)
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return lease_get(lease.id)
def lease_update(lease_id, values):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
lease.update(values)
lease.save(session=session)
return lease_get(lease_id)
def lease_destroy(lease_id):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
if not lease:
# raise not found error
raise db_exc.BlazarDBNotFound(id=lease_id, model='Lease')
for reservation in lease.reservations:
_reservation_destroy(session, reservation)
for event in lease.events:
event.soft_delete(session=session)
lease.soft_delete(session=session)
# Event
def _event_get(session, event_id, deleted=False):
query = model_query(models.Event, session, deleted=deleted)
return query.filter_by(id=event_id).first()
def _event_get_all(session):
query = model_query(models.Event, session)
return query
def event_get(event_id):
return _event_get(get_session(), event_id)
def event_get_all():
return _event_get_all(get_session()).all()
def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
"""Return an event query filtered and sorted by name of the field."""
sort_fn = {'desc': desc, 'asc': asc}
events_query = _event_get_all(get_session())
if 'status' in filters:
events_query = (
events_query.filter(models.Event.status == filters['status']))
if 'lease_id' in filters:
events_query = (
events_query.filter(models.Event.lease_id == filters['lease_id']))
if 'event_type' in filters:
events_query = events_query.filter(models.Event.event_type ==
filters['event_type'])
if 'time' in filters:
border = filters['time']['border']
if filters['time']['op'] == 'lt':
events_query = events_query.filter(models.Event.time < border)
elif filters['time']['op'] == 'le':
events_query = events_query.filter(models.Event.time <= border)
elif filters['time']['op'] == 'gt':
events_query = events_query.filter(models.Event.time > border)
elif filters['time']['op'] == 'ge':
events_query = events_query.filter(models.Event.time >= border)
elif filters['time']['op'] == 'eq':
events_query = events_query.filter(models.Event.time == border)
events_query = events_query.order_by(
sort_fn[sort_dir](getattr(models.Event, sort_key))
)
return events_query
def event_get_first_sorted_by_filters(sort_key, sort_dir, filters):
"""Return first result for events
Return the first result for all events matching the filters
and sorted by name of the field.
"""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).first()
def event_get_all_sorted_by_filters(sort_key, sort_dir, filters):
"""Return events filtered and sorted by name of the field."""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).all()
def event_create(values):
values = values.copy()
event = models.Event()
event.update(values)
session = get_session()
with session.begin():
try:
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return event_get(event.id)
def event_update(event_id, values):
session = get_session()
with session.begin():
# NOTE(jason): Allow updating soft-deleted events
event = _event_get(session, event_id, deleted=True)
event.update(values)
event.save(session=session)
return event_get(event_id)
def event_destroy(event_id):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
if not event:
# raise not found error
raise db_exc.BlazarDBNotFound(id=event_id, model='Event')
event.soft_delete(session=session)
# ComputeHostReservation
def _host_reservation_get(session, host_reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(id=host_reservation_id).first()
def host_reservation_get(host_reservation_id):
return _host_reservation_get(get_session(),
host_reservation_id)
def host_reservation_get_all():
query = model_query(models.ComputeHostReservation, get_session())
return query.all()
def _host_reservation_get_by_reservation_id(session, reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(reservation_id=reservation_id).first()
def host_reservation_get_by_reservation_id(reservation_id):
return _host_reservation_get_by_reservation_id(get_session(),
reservation_id)
def host_reservation_create(values):
values = values.copy()
host_reservation = models.ComputeHostReservation()
host_reservation.update(values)
session = get_session()
with session.begin():
try:
host_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_reservation.__class__.__name__, columns=e.columns)
return host_reservation_get(host_reservation.id)
def host_reservation_update(host_reservation_id, values):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
host_reservation.update(values)
host_reservation.save(session=session)
return host_reservation_get(host_reservation_id)
def host_reservation_destroy(host_reservation_id):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
if not host_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_reservation_id, model='ComputeHostReservation')
host_reservation.soft_delete(session=session)
# InstanceReservation
def instance_reservation_create(values):
value = values.copy()
instance_reservation = models.InstanceReservations()
instance_reservation.update(value)
session = get_session()
with session.begin():
try:
instance_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=instance_reservation.__class__.__name__,
columns=e.columns)
return instance_reservation_get(instance_reservation.id)
def instance_reservation_get(instance_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.InstanceReservations, session)
return query.filter_by(id=instance_reservation_id).first()
def instance_reservation_update(instance_reservation_id, values):
session = get_session()
with session.begin():
instance_reservation = instance_reservation_get(
instance_reservation_id, session)
if not instance_reservation:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
instance_reservation.update(values)
instance_reservation.save(session=session)
return instance_reservation_get(instance_reservation_id)
def instance_reservation_destroy(instance_reservation_id):
session = get_session()
with session.begin():
instance = instance_reservation_get(instance_reservation_id)
if not instance:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
instance.soft_delete(session=session)
# ComputeHostAllocation
def _host_allocation_get(session, host_allocation_id):
query = model_query(models.ComputeHostAllocation, session)
return query.filter_by(id=host_allocation_id).first()
def host_allocation_get(host_allocation_id):
return _host_allocation_get(get_session(),
host_allocation_id)
def host_allocation_get_all():
query = model_query(models.ComputeHostAllocation, get_session())
return query.all()
def host_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.ComputeHostAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.ComputeHostAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def host_allocation_create(values):
values = values.copy()
host_allocation = models.ComputeHostAllocation()
host_allocation.update(values)
session = get_session()
with session.begin():
try:
host_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_allocation.__class__.__name__, columns=e.columns)
return host_allocation_get(host_allocation.id)
def host_allocation_update(host_allocation_id, values):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
host_allocation.update(values)
host_allocation.save(session=session)
return host_allocation_get(host_allocation_id)
def host_allocation_destroy(host_allocation_id):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
if not host_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_allocation_id, model='ComputeHostAllocation')
host_allocation.soft_delete(session=session)
# ComputeHost
def _host_get(session, host_id):
query = model_query(models.ComputeHost, session)
return query.filter_by(id=host_id).first()
def _host_get_all(session):
query = model_query(models.ComputeHost, session)
return query
def host_get(host_id):
return _host_get(get_session(), host_id)
def host_list():
return model_query(models.ComputeHost, get_session()).all()
def host_get_all_by_filters(filters):
"""Returns hosts filtered by name of the field."""
hosts_query = _host_get_all(get_session())
if 'status' in filters:
hosts_query = hosts_query.filter(
models.ComputeHost.status == filters['status'])
return hosts_query.all()
def host_get_all_by_queries(queries):
"""Returns hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
hosts_query = model_query(models.ComputeHost, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
hosts = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.ComputeHost, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
hosts_query = hosts_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_host_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='ComputeHostExtraCapability')
for host, capability_name in extra_filter:
if op in oper and oper[op][1](host.capability_value, value):
hosts.append(host.computehost_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any host which doesn't have the
# extra capability present.
all_hosts = [h.id for h in hosts_query.all()]
extra_filter_hosts = [h.computehost_id for h, _ in extra_filter]
hosts += [h for h in all_hosts if h not in extra_filter_hosts]
return hosts_query.filter(~models.ComputeHost.id.in_(hosts)).all()
def reservable_host_get_all_by_queries(queries):
"""Returns reservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return host_get_all_by_queries(queries)
def unreservable_host_get_all_by_queries(queries):
"""Returns unreservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return host_get_all_by_queries(queries)
def host_create(values):
values = values.copy()
host = models.ComputeHost()
host.update(values)
session = get_session()
with session.begin():
try:
host.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host.__class__.__name__, columns=e.columns)
return host_get(host.id)
def host_update(host_id, values):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
host.update(values)
host.save(session=session)
return host_get(host_id)
def host_destroy(host_id):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
if not host:
# raise not found error
raise db_exc.BlazarDBNotFound(id=host_id, model='Host')
session.delete(host)
# ComputeHostExtraCapability
def _host_extra_capability_query(session):
return (
model_query(models.ComputeHostExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _host_extra_capability_get(session, host_extra_capability_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.id == host_extra_capability_id)
return query.first()
def host_extra_capability_get(host_extra_capability_id):
return _host_extra_capability_get(get_session(),
host_extra_capability_id)
def _host_extra_capability_get_all_per_host(session, host_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.computehost_id == host_id)
return query
def host_extra_capability_get_all_per_host(host_id):
return _host_extra_capability_get_all_per_host(get_session(),
host_id).all()
def host_extra_capability_create(values):
values = values.copy()
resource_property = resource_property_get_or_create(
'physical:host', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
host_extra_capability = models.ComputeHostExtraCapability()
host_extra_capability.update(values)
session = get_session()
with session.begin():
try:
host_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_extra_capability.__class__.__name__,
columns=e.columns)
return host_extra_capability_get(host_extra_capability.id)
def host_extra_capability_update(host_extra_capability_id, values):
session = get_session()
with session.begin():
host_extra_capability, _ = (
_host_extra_capability_get(session,
host_extra_capability_id))
host_extra_capability.update(values)
host_extra_capability.save(session=session)
return host_extra_capability_get(host_extra_capability_id)
def host_extra_capability_destroy(host_extra_capability_id):
session = get_session()
with session.begin():
host_extra_capability = _host_extra_capability_get(
session, host_extra_capability_id)
if not host_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_extra_capability_id,
model='ComputeHostExtraCapability')
session.delete(host_extra_capability[0])
def host_extra_capability_get_all_per_name(host_id, capability_name):
session = get_session()
with session.begin():
query = _host_extra_capability_get_all_per_host(session, host_id)
return query.filter(
models.ExtraCapability.capability_name == capability_name).all()
# FloatingIP reservation
def fip_reservation_create(fip_reservation_values):
values = fip_reservation_values.copy()
fip_reservation = models.FloatingIPReservation()
fip_reservation.update(values)
session = get_session()
with session.begin():
try:
fip_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_reservation.__class__.__name__, columns=e.columns)
return fip_reservation_get(fip_reservation.id)
def _fip_reservation_get(session, fip_reservation_id):
query = model_query(models.FloatingIPReservation, session)
return query.filter_by(id=fip_reservation_id).first()
def fip_reservation_get(fip_reservation_id):
return _fip_reservation_get(get_session(), fip_reservation_id)
def fip_reservation_update(fip_reservation_id, fip_reservation_values):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
fip_reservation.update(fip_reservation_values)
fip_reservation.save(session=session)
return fip_reservation_get(fip_reservation_id)
def fip_reservation_destroy(fip_reservation_id):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
if not fip_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=fip_reservation_id, model='FloatingIPReservation')
fip_reservation.soft_delete(session=session)
session.delete(fip_reservation)
# Required FIP
def required_fip_create(required_fip_values):
values = required_fip_values.copy()
required_fip = models.RequiredFloatingIP()
required_fip.update(values)
session = get_session()
with session.begin():
try:
required_fip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=required_fip.__class__.__name__, columns=e.columns)
return required_fip_get(required_fip.id)
def _required_fip_get(session, required_fip_id):
query = model_query(models.RequiredFloatingIP, session)
return query.filter_by(id=required_fip_id).first()
def required_fip_get(required_fip_id):
return _required_fip_get(get_session(), required_fip_id)
def required_fip_update(required_fip_id, required_fip_values):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
required_fip.update(required_fip_values)
required_fip.save(session=session)
return required_fip_get(required_fip_id)
def required_fip_destroy(required_fip_id):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
if not required_fip:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=required_fip_id, model='RequiredFloatingIP')
required_fip.soft_delete(session=session)
session.delete(required_fip)
def required_fip_destroy_by_fip_reservation_id(fip_reservation_id):
session = get_session()
with session.begin():
required_fips = model_query(
models.RequiredFloatingIP, session).filter_by(
floatingip_reservation_id=fip_reservation_id)
for required_fip in required_fips:
required_fip_destroy(required_fip['id'])
# FloatingIP Allocation
def _fip_allocation_get(session, fip_allocation_id):
query = model_query(models.FloatingIPAllocation, session)
return query.filter_by(id=fip_allocation_id).first()
def fip_allocation_get(fip_allocation_id):
return _fip_allocation_get(get_session(), fip_allocation_id)
def fip_allocation_create(allocation_values):
values = allocation_values.copy()
fip_allocation = models.FloatingIPAllocation()
fip_allocation.update(values)
session = get_session()
with session.begin():
try:
fip_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_allocation.__class__.__name__, columns=e.columns)
return fip_allocation_get(fip_allocation.id)
def fip_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.FloatingIPAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.FloatingIPAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def fip_allocation_destroy(allocation_id):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
if not fip_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=allocation_id, model='FloatingIPAllocation')
fip_allocation.soft_delete(session=session)
session.delete(fip_allocation)
def fip_allocation_update(allocation_id, allocation_values):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
fip_allocation.update(allocation_values)
fip_allocation.save(session=session)
return fip_allocation_get(allocation_id)
# Floating IP
def _floatingip_get(session, floatingip_id):
query = model_query(models.FloatingIP, session)
return query.filter_by(id=floatingip_id).first()
def _floatingip_get_all(session):
query = model_query(models.FloatingIP, session)
return query
def fip_get_all_by_queries(queries):
"""Returns Floating IPs filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
fips_query = model_query(models.FloatingIP, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.FloatingIP, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
fips_query = fips_query.filter(filt)
else:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
return fips_query.all()
def reservable_fip_get_all_by_queries(queries):
"""Returns reservable fips filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return fip_get_all_by_queries(queries)
def floatingip_get(floatingip_id):
return _floatingip_get(get_session(), floatingip_id)
def floatingip_list():
return model_query(models.FloatingIP, get_session()).all()
def floatingip_create(values):
values = values.copy()
floatingip = models.FloatingIP()
floatingip.update(values)
session = get_session()
with session.begin():
try:
floatingip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=floatingip.__class__.__name__, columns=e.columns)
return floatingip_get(floatingip.id)
def floatingip_destroy(floatingip_id):
session = get_session()
with session.begin():
floatingip = _floatingip_get(session, floatingip_id)
if not floatingip:
# raise not found error
raise db_exc.BlazarDBNotFound(id=floatingip_id, model='FloatingIP')
session.delete(floatingip)
# Networks
def _network_get(session, network_id):
query = model_query(models.NetworkSegment, session)
return query.filter_by(id=network_id).first()
def _network_get_all(session):
query = model_query(models.NetworkSegment, session)
return query
def network_get(network_id):
return _network_get(get_session(), network_id)
def network_list():
return model_query(models.NetworkSegment, get_session()).all()
def network_create(values):
values = values.copy()
network = models.NetworkSegment()
network.update(values)
session = get_session()
with session.begin():
try:
network.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network.__class__.__name__, columns=e.columns)
return network_get(network.id)
def network_update(network_id, values):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
network.update(values)
network.save(session=session)
return network_get(network_id)
def network_destroy(network_id):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
if not network:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_id, model='Network segment')
session.delete(network)
# NetworkAllocation
def _network_allocation_get(session, network_allocation_id):
query = model_query(models.NetworkAllocation, session)
return query.filter_by(id=network_allocation_id).first()
def network_allocation_get(network_allocation_id):
return _network_allocation_get(get_session(),
network_allocation_id)
def network_allocation_get_all():
query = model_query(models.NetworkAllocation, get_session())
return query.all()
def network_allocation_create(values):
values = values.copy()
network_allocation = models.NetworkAllocation()
network_allocation.update(values)
session = get_session()
with session.begin():
try:
network_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_allocation.__class__.__name__, columns=e.columns)
return network_allocation_get(network_allocation.id)
def network_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.NetworkAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.NetworkAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def network_allocation_destroy(network_allocation_id):
session = get_session()
with session.begin():
network_allocation = _network_allocation_get(session,
network_allocation_id)
if not network_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_allocation_id, model='NetworkAllocation')
network_allocation.soft_delete(session=session)
# NetworkReservation
def network_reservation_create(values):
value = values.copy()
network_reservation = models.NetworkReservation()
network_reservation.update(value)
session = get_session()
with session.begin():
try:
network_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_reservation.__class__.__name__,
columns=e.columns)
return network_reservation_get(network_reservation.id)
def network_reservation_get(network_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.NetworkReservation, session)
return query.filter_by(id=network_reservation_id).first()
def network_reservation_update(network_reservation_id, values):
session = get_session()
with session.begin():
network_reservation = network_reservation_get(
network_reservation_id, session)
if not network_reservation:
raise db_exc.BlazarDBNotFound(
id=network_reservation_id, model='NetworkReservation')
network_reservation.update(values)
network_reservation.save(session=session)
return network_reservation_get(network_reservation_id)
def network_reservation_destroy(network_reservation_id):
session = get_session()
with session.begin():
network = network_reservation_get(network_reservation_id)
if not network:
raise db_exc.BlazarDBNotFound(
id=network_reservation_id, model='NetworkReservation')
network.soft_delete(session=session)
def network_get_all_by_filters(filters):
"""Returns networks filtered by name of the field."""
networks_query = _network_get_all(get_session())
if 'status' in filters:
networks_query = networks_query.filter(
models.NetworkSegment.status == filters['status'])
return networks_query.all()
def network_get_all_by_queries(queries):
"""Return networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
networks_query = model_query(models.NetworkSegment, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
networks = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.NetworkSegment, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
networks_query = networks_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_network_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='NetworkSegmentExtraCapability')
for network, capability_name in extra_filter:
if op in oper and oper[op][1](network.capability_value, value):
networks.append(network.network_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any network which doesn't have the
# extra capability present.
all_networks = [h.id for h in networks_query.all()]
extra_filter_networks = [h.network_id for h, _ in extra_filter]
networks += [h for h in all_networks if h not in
extra_filter_networks]
return networks_query.filter(~models.NetworkSegment.id.in_(networks)).all()
def reservable_network_get_all_by_queries(queries):
"""Return reservable networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return network_get_all_by_queries(queries)
def unreservable_network_get_all_by_queries(queries):
"""Return unreservable networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return network_get_all_by_queries(queries)
# NetworkSegmentExtraCapability
def _network_extra_capability_query(session):
return (
model_query(models.NetworkSegmentExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _network_extra_capability_get(session, network_extra_capability_id):
query = _network_extra_capability_query(session).filter(
models.NetworkSegmentExtraCapability.id == network_extra_capability_id)
return query.first()
def network_extra_capability_get(network_extra_capability_id):
return _network_extra_capability_get(get_session(),
network_extra_capability_id)
def _network_extra_capability_get_all_per_network(session, network_id):
query = _network_extra_capability_query(session).filter(
models.NetworkSegmentExtraCapability.network_id == network_id)
return query
def network_extra_capability_get_all_per_network(network_id):
return _network_extra_capability_get_all_per_network(get_session(),
network_id).all()
def network_extra_capability_create(values):
values = values.copy()
resource_property = _resource_property_get_or_create(
get_session(), 'network', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
network_extra_capability = models.NetworkSegmentExtraCapability()
network_extra_capability.update(values)
session = get_session()
with session.begin():
try:
network_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_extra_capability.__class__.__name__,
columns=e.columns)
return network_extra_capability_get(network_extra_capability.id)
def network_extra_capability_update(network_extra_capability_id, values):
session = get_session()
with session.begin():
network_extra_capability, _ = (
_network_extra_capability_get(session,
network_extra_capability_id))
network_extra_capability.update(values)
network_extra_capability.save(session=session)
return network_extra_capability_get(network_extra_capability_id)
def network_extra_capability_destroy(network_extra_capability_id):
session = get_session()
with session.begin():
network_extra_capability = _network_extra_capability_get(
session, network_extra_capability_id)
if not network_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_extra_capability_id,
model='NetworkSegmentExtraCapability')
session.delete(network_extra_capability[0])
def network_extra_capability_get_all_per_name(network_id, capability_name):
session = get_session()
with session.begin():
query = _network_extra_capability_get_all_per_network(
session, network_id)
return query.filter_by(capability_name=capability_name).all()
def network_extra_capability_get_latest_per_name(network_id, capability_name):
session = get_session()
with session.begin():
query = _network_extra_capability_get_all_per_network(session,
network_id)
return (
query
.filter(models.ExtraCapability.capability_name == capability_name)
.order_by(models.NetworkSegmentExtraCapability.created_at.desc())
.first())
# Devices
def _device_get(session, device_id):
query = model_query(models.Device, session)
return query.filter_by(id=device_id).first()
def _device_get_all(session):
query = model_query(models.Device, session)
return query
def device_get(device_id):
return _device_get(get_session(), device_id)
def device_list():
return model_query(models.Device, get_session()).all()
def device_create(values):
values = values.copy()
device = models.Device()
device.update(values)
session = get_session()
with session.begin():
try:
device.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device.__class__.__name__, columns=e.columns)
return device_get(device.id)
def device_update(device_id, values):
session = get_session()
with session.begin():
device = _device_get(session, device_id)
device.update(values)
device.save(session=session)
return device_get(device_id)
def device_destroy(device_id):
session = get_session()
with session.begin():
device = _device_get(session, device_id)
if not device:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_id, model='Device')
session.delete(device)
# DeviceAllocation
def _device_allocation_get(session, device_allocation_id):
query = model_query(models.DeviceAllocation, session)
return query.filter_by(id=device_allocation_id).first()
def device_allocation_get(device_allocation_id):
return _device_allocation_get(get_session(),
device_allocation_id)
def device_allocation_get_all():
query = model_query(models.DeviceAllocation, get_session())
return query.all()
def device_allocation_create(values):
values = values.copy()
device_allocation = models.DeviceAllocation()
device_allocation.update(values)
session = get_session()
with session.begin():
try:
device_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_allocation.__class__.__name__, columns=e.columns)
return device_allocation_get(device_allocation.id)
def device_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.DeviceAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.DeviceAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def device_allocation_update(device_allocation_id, values):
session = get_session()
with session.begin():
device_allocation = _device_allocation_get(session,
device_allocation_id)
device_allocation.update(values)
device_allocation.save(session=session)
return device_allocation_get(device_allocation_id)
def device_allocation_destroy(device_allocation_id):
session = get_session()
with session.begin():
device_allocation = _device_allocation_get(session,
device_allocation_id)
if not device_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_allocation_id, model='DeviceAllocation')
device_allocation.soft_delete(session=session)
# DeviceReservation
def device_reservation_create(values):
value = values.copy()
device_reservation = models.DeviceReservation()
device_reservation.update(value)
session = get_session()
with session.begin():
try:
device_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_reservation.__class__.__name__,
columns=e.columns)
return device_reservation_get(device_reservation.id)
def device_reservation_get(device_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.DeviceReservation, session)
return query.filter_by(id=device_reservation_id).first()
def device_reservation_update(device_reservation_id, values):
session = get_session()
with session.begin():
device_reservation = device_reservation_get(
device_reservation_id, session)
if not device_reservation:
raise db_exc.BlazarDBNotFound(
id=device_reservation_id, model='DeviceReservation')
device_reservation.update(values)
device_reservation.save(session=session)
return device_reservation_get(device_reservation_id)
def device_reservation_destroy(device_reservation_id):
session = get_session()
with session.begin():
device = device_reservation_get(device_reservation_id)
if not device:
raise db_exc.BlazarDBNotFound(
id=device_reservation_id, model='DeviceReservation')
device.soft_delete(session=session)
def device_get_all_by_filters(filters):
"""Returns devices filtered by name of the field."""
devices_query = _device_get_all(get_session())
if 'status' in filters:
devices_query = devices_query.filter(
models.Device.status == filters['status'])
return devices_query.all()
def device_get_all_by_queries(queries):
"""Return devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
devices_query = model_query(models.Device, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
devices = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.Device, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
devices_query = devices_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_device_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='DeviceExtraCapability')
for device, capability_name in extra_filter:
if op in oper and oper[op][1](device.capability_value, value):
devices.append(device.device_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any device which doesn't have the
# extra capability present.
all_devices = [h.id for h in devices_query.all()]
extra_filter_devices = [h.device_id for h, _ in extra_filter]
devices += [h for h in all_devices if h not in
extra_filter_devices]
return devices_query.filter(~models.Device.id.in_(devices)).all()
def reservable_device_get_all_by_queries(queries):
"""Return reservable devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return device_get_all_by_queries(queries)
def unreservable_device_get_all_by_queries(queries):
"""Return unreservable devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return device_get_all_by_queries(queries)
# DeviceExtraCapability
def _device_extra_capability_query(session):
return (
model_query(models.DeviceExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _device_extra_capability_get(session, device_extra_capability_id):
query = _device_extra_capability_query(session).filter(
models.DeviceExtraCapability.id == device_extra_capability_id)
return query.first()
def device_extra_capability_get(device_extra_capability_id):
return _device_extra_capability_get(get_session(),
device_extra_capability_id)
def _device_extra_capability_get_all_per_device(session, device_id):
query = _device_extra_capability_query(session).filter(
models.DeviceExtraCapability.device_id == device_id)
return query
def device_extra_capability_get_all_per_device(device_id):
return _device_extra_capability_get_all_per_device(get_session(),
device_id).all()
def device_extra_capability_create(values):
values = values.copy()
resource_property = _resource_property_get_or_create(
get_session(), 'device', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
device_extra_capability = models.DeviceExtraCapability()
device_extra_capability.update(values)
session = get_session()
with session.begin():
try:
device_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_extra_capability.__class__.__name__,
columns=e.columns)
return device_extra_capability_get(device_extra_capability.id)
def device_extra_capability_update(device_extra_capability_id, values):
session = get_session()
with session.begin():
device_extra_capability, _ = (
_device_extra_capability_get(session,
device_extra_capability_id))
device_extra_capability.update(values)
device_extra_capability.save(session=session)
return device_extra_capability_get(device_extra_capability_id)
def device_extra_capability_destroy(device_extra_capability_id):
session = get_session()
with session.begin():
device_extra_capability = _device_extra_capability_get(
session, device_extra_capability_id)
if not device_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_extra_capability_id,
model='DeviceExtraCapability')
session.delete(device_extra_capability[0])
def device_extra_capability_get_all_per_name(device_id, capability_name):
session = get_session()
with session.begin():
query = _device_extra_capability_get_all_per_device(
session, device_id)
return query.filter_by(capability_name=capability_name).all()
def device_extra_capability_get_latest_per_name(device_id, capability_name):
session = get_session()
with session.begin():
query = _device_extra_capability_get_all_per_device(session,
device_id)
return (
query
.filter(models.ExtraCapability.capability_name == capability_name)
.order_by(models.DeviceExtraCapability.created_at.desc())
.first())
# Resource Properties
def _resource_property_get(session, resource_type, capability_name):
query = (
model_query(models.ExtraCapability, session)
.filter_by(resource_type=resource_type)
.filter_by(capability_name=capability_name))
return query.first()
def resource_property_get(resource_type, capability_name):
return _resource_property_get(get_session(), resource_type,
capability_name)
def resource_properties_list(resource_type):
if resource_type not in EXTRA_CAPABILITY_MODELS:
raise db_exc.BlazarDBExtraCapabilitiesNotEnabled(
resource_type=resource_type)
session = get_session()
with session.begin():
resource_model = EXTRA_CAPABILITY_MODELS[resource_type]
query = session.query(
models.ExtraCapability.capability_name,
models.ExtraCapability.private,
resource_model.capability_value).join(resource_model).distinct()
return query.all()
def _resource_property_create(session, values):
values = values.copy()
resource_property = models.ExtraCapability()
resource_property.update(values)
with session.begin():
try:
resource_property.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=resource_property.__class__.__name__,
columns=e.columns)
return resource_property_get(values.get('resource_type'),
values.get('capability_name'))
def resource_property_create(values):
return _resource_property_create(get_session(), values)
def resource_property_update(resource_type, property_name, values):
if resource_type not in EXTRA_CAPABILITY_MODELS:
raise db_exc.BlazarDBExtraCapabilitiesNotEnabled(
resource_type=resource_type)
values = values.copy()
session = get_session()
with session.begin():
resource_property = _resource_property_get(
session, resource_type, property_name)
if not resource_property:
raise db_exc.BlazarDBInvalidExtraCapability(
property_name=property_name,
resource_type=resource_type)
resource_property.update(values)
resource_property.save(session=session)
return resource_property_get(resource_type, property_name)
def _resource_property_get_or_create(session, resource_type, capability_name):
resource_property = _resource_property_get(
session, resource_type, capability_name)
if resource_property:
return resource_property
else:
rp_values = {
'resource_type': resource_type,
'capability_name': capability_name}
return resource_property_create(rp_values)
def resource_property_get_or_create(resource_type, capability_name):
return _resource_property_get_or_create(
get_session(), resource_type, capability_name)
```
#### File: blazar/plugins/monitor.py
```python
import datetime
from oslo_config import cfg
import abc
from blazar.manager import exceptions as manager_ex
from blazar.plugins import base
from blazar import status
from oslo_log import log as logging
monitor_opts = [
cfg.BoolOpt('enable_notification_monitor',
default=False,
help='Enable notification-based resource monitoring. '
'If it is enabled, the blazar-manager monitors states of '
'resource by subscribing to notifications of '
'the corresponding service.'),
cfg.ListOpt('notification_topics',
default=['notifications', 'versioned_notifications'],
help='Notification topics to subscribe to.'),
cfg.BoolOpt('enable_polling_monitor',
default=False,
help='Enable polling-based resource monitoring. '
'If it is enabled, the blazar-manager monitors states '
'of resource by polling the service API.'),
cfg.IntOpt('polling_interval',
default=60,
min=1,
help='Interval (seconds) of polling for health checking.'),
cfg.IntOpt('healing_interval',
default=60,
min=0,
help='Interval (minutes) of reservation healing. '
'If 0 is specified, the interval is infinite and all the '
'reservations in the future is healed at one time.'),
]
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class GeneralMonitorPlugin(base.BaseMonitorPlugin, metaclass=abc.ABCMeta):
"""Monitor plugin for resource."""
# Singleton design pattern
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = \
super(GeneralMonitorPlugin, cls).__new__(cls)
cls._instance.resource_type = kwargs.get("resource_type")
super(GeneralMonitorPlugin, cls._instance).__init__()
return cls._instance
def __init__(self, *args, **kwargs):
"""Do nothing.
This class uses the Singleton design pattern and an instance of this
class is generated and initialized in __new__().
"""
pass
def register_healing_handler(self, handler):
self.heal_reservations = handler
def register_reallocater(self, reallocator):
self._reallocate = reallocator
@abc.abstractmethod
def filter_allocations(self, reservation, resource_ids):
"""Filter allocations of a reservation by resource ids
:param reservation: a reservation dict
:param resource_ids: a list of resource ids
:return: a list of allocations that contain resources
"""
pass
@abc.abstractmethod
def get_reservations_by_resource_ids(self, resource_ids,
interval_begin, interval_end):
"""Get reservations by resource ids.
:param resource ids: a list of resource ids.
:param interval_begin: start date of the searching period.
:param interval_end: end date of the searching period.
:return: a list of reservation dict
"""
pass
@abc.abstractmethod
def get_unreservable_resourses(self):
"""Get all unreservable resources
"""
pass
@abc.abstractmethod
def poll_resource_failures(self):
"""Get a list of failed resources and recovered resources
"""
pass
@abc.abstractmethod
def set_reservable(self, resource_id, is_reservable):
"""Set resource as reservable or not reservable
"""
pass
def heal_reservations(self, failed_resources, interval_begin,
interval_end):
"""Heal reservations which suffer from resource failures.
:param failed_resources: a list of failed resources.
:param interval_begin: start date of the period to heal.
:param interval_end: end date of the period to heal.
:return: a dictionary of {reservation id: flags to update}
e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':
{'missing_resources': True}}
"""
reservation_flags = {}
resource_ids = [h['id'] for h in failed_resources]
reservations = self.get_reservations_by_resource_ids(resource_ids,
interval_begin,
interval_end)
for reservation in reservations:
if reservation['resource_type'] != self.resource_type:
continue
reservation_id = reservation["id"]
for allocation in self.filter_allocations(reservation,
resource_ids):
try:
if self._reallocate(allocation):
if reservation['status'] == status.reservation.ACTIVE:
if reservation_id not in reservation_flags:
reservation_flags[reservation_id] = {}
reservation_flags[reservation_id].update(
{'resources_changed': True})
else:
if reservation_id not in reservation_flags:
reservation_flags[reservation_id] = {}
reservation_flags[reservation_id].update(
{'missing_resources': True})
except manager_ex.ResourceBusy:
LOG.info(
"Cannot heal reservation %s, found servers",
reservation["id"]
)
return reservation_flags
def is_notification_enabled(self):
"""Check if the notification monitor is enabled."""
return CONF[self.resource_type].enable_notification_monitor
def get_notification_topics(self):
"""Get topics of notification to subscribe to."""
return CONF[self.resource_type].notification_topics
def is_polling_enabled(self):
"""Check if the polling monitor is enabled."""
return CONF[self.resource_type].enable_polling_monitor
def get_polling_interval(self):
"""Get interval of polling."""
return CONF[self.resource_type].polling_interval
def poll(self):
"""Detect and handle resource failures.
:return: a dictionary of {reservation id: flags to update}
e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':
{'missing_resources': True}}
"""
LOG.trace('Poll...')
failed_resources, recovered_resources = self.poll_resource_failures()
if failed_resources:
for resource in failed_resources:
self.set_reservable(resource, False)
if recovered_resources:
for resource in recovered_resources:
self.set_reservable(resource, True)
return self.heal()
def get_healing_interval(self):
"""Get interval of reservation healing in minutes."""
return CONF[self.resource_type].healing_interval
def heal(self):
"""Heal suffering reservations in the next healing interval.
:return: a dictionary of {reservation id: flags to update}
"""
reservation_flags = {}
resources = self.get_unreservable_resourses()
interval_begin = datetime.datetime.utcnow()
interval = self.get_healing_interval()
if interval == 0:
interval_end = datetime.date.max
else:
interval_end = interval_begin + datetime.timedelta(
minutes=interval)
reservation_flags.update(self.heal_reservations(resources,
interval_begin,
interval_end))
return reservation_flags
```
#### File: plugins/networks/test_network_plugin.py
```python
import datetime
import uuid
from oslo_config import cfg
from unittest import mock
from blazar import context
from blazar.db import api as db_api
from blazar.db import exceptions as db_exceptions
from blazar.db import utils as db_utils
from blazar.manager import exceptions as manager_exceptions
from blazar.manager import service
from blazar.plugins import networks as plugin
from blazar.plugins.networks import network_plugin
from blazar import tests
from blazar.utils.openstack import base
from blazar.utils.openstack import ironic
from blazar.utils.openstack import neutron
from blazar.utils.openstack import nova
from blazar.utils import trusts
CONF = cfg.CONF
class NetworkPluginTestCase(tests.TestCase):
def setUp(self):
super(NetworkPluginTestCase, self).setUp()
self.cfg = cfg
self.context = context
self.patch(self.context, 'BlazarContext')
self.ironic_client = (
self.patch(ironic, 'BlazarIronicClient').return_value)
self.neutron_client = (
self.patch(neutron, 'BlazarNeutronClient').return_value)
self.service = service
self.manager = self.service.ManagerService()
self.fake_network_id = 'e3ed59f3-27e6-48df-b8bd-2a397aeb57dc'
self.fake_network_values = {
'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 1234
}
self.fake_network = self.fake_network_values.copy()
self.fake_network['id'] = self.fake_network_id
self.patch(base, 'url_for').return_value = 'http://foo.bar'
self.network_plugin = network_plugin
self.fake_network_plugin = self.network_plugin.NetworkPlugin()
self.db_api = db_api
self.db_utils = db_utils
self.db_network_get = self.patch(self.db_api, 'network_get')
self.db_network_get.return_value = self.fake_network
self.db_network_list = self.patch(self.db_api, 'network_list')
self.db_network_create = self.patch(self.db_api, 'network_create')
self.db_network_update = self.patch(self.db_api, 'network_update')
self.db_network_destroy = self.patch(self.db_api, 'network_destroy')
self.db_network_extra_capability_get_all_per_network = self.patch(
self.db_api, 'network_extra_capability_get_all_per_network')
self.db_network_extra_capability_get_all_per_name = self.patch(
self.db_api, 'network_extra_capability_get_all_per_name')
self.db_network_extra_capability_create = self.patch(
self.db_api, 'network_extra_capability_create')
self.db_network_extra_capability_update = self.patch(
self.db_api, 'network_extra_capability_update')
self.get_extra_capabilities = self.patch(
self.fake_network_plugin, '_get_extra_capabilities')
self.get_extra_capabilities.return_value = {
'foo': 'bar',
'buzz': 'word',
}
self.fake_network_plugin.setup(None)
self.trusts = trusts
self.trust_ctx = self.patch(self.trusts, 'create_ctx_from_trust')
self.trust_create = self.patch(self.trusts, 'create_trust')
self.ServerManager = nova.ServerManager
def test_get_network(self):
network = self.fake_network_plugin.get_network(self.fake_network_id)
self.db_network_get.assert_called_once_with(self.fake_network_id)
expected = self.fake_network.copy()
expected.update({'foo': 'bar', 'buzz': 'word'})
self.assertEqual(expected, network)
def test_get_network_without_extracapabilities(self):
self.get_extra_capabilities.return_value = {}
network = self.fake_network_plugin.get_network(self.fake_network_id)
self.db_network_get.assert_called_once_with(self.fake_network_id)
self.assertEqual(self.fake_network, network)
def test_list_networks(self):
self.fake_network_plugin.list_networks()
self.db_network_list.assert_called_once_with()
def test_create_network_without_extra_capabilities(self):
network_values = {
'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 1234
}
expected_network_values = network_values.copy()
self.get_extra_capabilities.return_value = {}
network = self.fake_network_plugin.create_network(network_values)
self.db_network_create.assert_called_once_with(expected_network_values)
self.assertEqual(network, self.fake_network)
def test_create_network_with_extra_capabilities(self):
fake_network = self.fake_network.copy()
fake_network.update({'foo': 'bar'})
# NOTE(sbauza): 'id' will be pop'd, we need to keep track of it
fake_request = fake_network.copy()
fake_capa = {'network_id': self.fake_network_id,
'capability_name': 'foo',
'capability_value': 'bar',
}
self.get_extra_capabilities.return_value = {'foo': 'bar'}
self.db_network_create.return_value = self.fake_network
network = self.fake_network_plugin.create_network(fake_request)
self.db_network_create.assert_called_once_with(
self.fake_network_values)
self.db_network_extra_capability_create.assert_called_once_with(
fake_capa)
self.assertEqual(network, fake_network)
def test_create_network_with_capabilities_too_long(self):
fake_network = self.fake_network_values.copy()
fake_network.update({'foo': 'bar'})
# NOTE(sbauza): 'id' will be pop'd, we need to keep track of it
fake_request = fake_network.copy()
long_key = ""
for i in range(65):
long_key += "0"
fake_request[long_key] = "foo"
self.db_network_create.return_value = self.fake_network
self.assertRaises(manager_exceptions.ExtraCapabilityTooLong,
self.fake_network_plugin.create_network,
fake_request)
def test_create_network_without_required_params(self):
self.assertRaises(manager_exceptions.MissingParameter,
self.fake_network_plugin.create_network,
{'network_type': 'vlan',
'physical_network': 'physnet1'})
def test_create_network_with_invalid_segment_id(self):
self.assertRaises(manager_exceptions.MalformedParameter,
self.fake_network_plugin.create_network,
{'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 0})
self.assertRaises(manager_exceptions.MalformedParameter,
self.fake_network_plugin.create_network,
{'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 4095})
def test_create_network_issuing_rollback(self):
def fake_db_network_create(*args, **kwargs):
raise db_exceptions.BlazarDBException
self.db_network_create.side_effect = fake_db_network_create
self.assertRaises(db_exceptions.BlazarDBException,
self.fake_network_plugin.create_network,
self.fake_network)
def test_create_duplicate_network(self):
def fake_db_network_create(*args, **kwargs):
raise db_exceptions.BlazarDBDuplicateEntry
self.db_network_create.side_effect = fake_db_network_create
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
self.fake_network_plugin.create_network,
self.fake_network)
def test_create_network_having_issue_when_storing_extra_capability(self):
def fake_db_network_extra_capability_create(*args, **kwargs):
raise db_exceptions.BlazarDBException
fake_network = self.fake_network_values.copy()
fake_network.update({'foo': 'bar'})
fake_request = fake_network.copy()
self.get_extra_capabilities.return_value = {'foo': 'bar'}
self.db_network_create.return_value = self.fake_network
fake = self.db_network_extra_capability_create
fake.side_effect = fake_db_network_extra_capability_create
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_network_plugin.create_network,
fake_request)
def test_update_network(self):
network_values = {'segment_id': 2345}
self.fake_network_plugin.update_network(self.fake_network_id,
network_values)
self.db_network_update.assert_called_once_with(
self.fake_network_id, network_values)
def test_update_network_extra_capabilities(self):
network_values = {'foo': 'baz'}
self.db_network_extra_capability_get_all_per_name.return_value = [
({'id': 'extra_id1',
'network_id': self.fake_network_id,
'capability_value': 'bar'},
'foo')
]
self.get_reservations_by_network = self.patch(
self.db_utils, 'get_reservations_by_network_id')
self.get_reservations_by_network.return_value = []
self.fake_network_plugin.update_network(self.fake_network_id,
network_values)
self.db_network_extra_capability_update.assert_called_once_with(
'extra_id1', {'capability_name': 'foo', 'capability_value': 'baz'})
def test_update_network_having_issue_when_storing_extra_capability(self):
def fake_db_network_extra_capability_update(*args, **kwargs):
raise RuntimeError
network_values = {'foo': 'baz'}
self.get_reservations_by_network = self.patch(
self.db_utils, 'get_reservations_by_network_id')
self.get_reservations_by_network.return_value = []
self.db_network_extra_capability_get_all_per_name.return_value = [
({'id': 'extra_id1',
'network_id': self.fake_network_id,
'capability_value': 'bar'},
'foo')
]
fake = self.db_network_extra_capability_update
fake.side_effect = fake_db_network_extra_capability_update
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_network_plugin.update_network,
self.fake_network_id, network_values)
def test_update_network_with_new_extra_capability(self):
network_values = {'qux': 'word'}
self.db_network_extra_capability_get_all_per_network.return_value = []
self.fake_network_plugin.update_network(self.fake_network_id,
network_values)
self.db_network_extra_capability_create.assert_called_once_with({
'network_id': self.fake_network_id,
'capability_name': 'qux',
'capability_value': 'word'
})
def test_update_network_with_used_capability(self):
network_values = {'foo': 'buzz'}
self.db_network_extra_capability_get_all_per_name.return_value = [
({'id': 'extra_id1',
'network_id': self.fake_network_id,
'capability_value': 'bar'},
'foo')
]
fake_network_reservation = {
'resource_type': plugin.RESOURCE_TYPE,
'resource_id': 'resource-1',
}
fake_get_reservations = self.patch(self.db_utils,
'get_reservations_by_network_id')
fake_get_reservations.return_value = [fake_network_reservation]
fake_get_plugin_reservation = self.patch(self.db_utils,
'get_plugin_reservation')
fake_get_plugin_reservation.return_value = {
'resource_properties': '["==", "$foo", "bar"]'
}
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_network_plugin.update_network,
self.fake_network_id, network_values)
fake_get_plugin_reservation.assert_called_once_with(
plugin.RESOURCE_TYPE, 'resource-1')
def test_delete_network(self):
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = []
self.fake_network_plugin.delete_network(self.fake_network_id)
self.db_network_destroy.assert_called_once_with(self.fake_network_id)
def test_delete_reserved_network(self):
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': self.fake_network_id
}
]
self.assertRaises(manager_exceptions.CantDeleteNetwork,
self.fake_network_plugin.delete_network,
self.fake_network_id)
def test_delete_network_not_existing_in_db(self):
self.db_network_get.return_value = None
self.assertRaises(manager_exceptions.NetworkNotFound,
self.fake_network_plugin.delete_network,
self.fake_network_id)
def test_delete_network_issuing_rollback(self):
def fake_db_network_destroy(*args, **kwargs):
raise db_exceptions.BlazarDBException
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = []
self.db_network_destroy.side_effect = fake_db_network_destroy
self.assertRaises(manager_exceptions.CantDeleteNetwork,
self.fake_network_plugin.delete_network,
self.fake_network_id)
def generate_event(self, id, lease_id, event_type, time, status='UNDONE'):
return {
'id': id,
'lease_id': lease_id,
'event_type': event_type,
'time': time,
'status': status
}
def get_uuid(self):
return str(uuid.uuid4())
def generate_basic_events(self, lease_id, start, before_end, end):
return [
self.generate_event(self.get_uuid(), lease_id, 'start_lease',
datetime.datetime.strptime(start,
'%Y-%m-%d %H:%M')),
self.generate_event(self.get_uuid(), lease_id, 'before_end_lease',
datetime.datetime.strptime(before_end,
'%Y-%m-%d %H:%M')),
self.generate_event(self.get_uuid(), lease_id, 'end_lease',
datetime.datetime.strptime(end,
'%Y-%m-%d %H:%M')),
]
def test_create_reservation_no_network_available(self):
now = datetime.datetime.utcnow()
lease = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'user_id': '123',
'project_id': '456',
}
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'start_date': now,
'end_date': now + datetime.timedelta(hours=1),
'resource_type': plugin.RESOURCE_TYPE,
'network_name': 'foo-net',
'network_properties': '',
'resource_properties': '',
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = lease
network_reservation_create = self.patch(self.db_api,
'network_reservation_create')
matching_networks = self.patch(self.fake_network_plugin,
'_matching_networks')
matching_networks.return_value = []
self.assertRaises(manager_exceptions.NotEnoughNetworksAvailable,
self.fake_network_plugin.reserve_resource,
u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
values)
network_reservation_create.assert_not_called()
def test_create_reservation_networks_available(self):
lease = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'user_id': '123',
'project_id': '456',
}
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'network_properties': '',
'resource_properties': '',
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00),
'resource_type': plugin.RESOURCE_TYPE,
'network_name': 'foo-net',
'network_description': ''
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = lease
network_reservation_create = self.patch(self.db_api,
'network_reservation_create')
matching_networks = self.patch(self.fake_network_plugin,
'_matching_networks')
matching_networks.return_value = ['network1', 'network2']
network_allocation_create = self.patch(
self.db_api,
'network_allocation_create')
self.fake_network_plugin.reserve_resource(
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
network_values = {
'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
'network_properties': '',
'resource_properties': '',
'status': 'pending',
'before_end': 'default',
'network_name': 'foo-net',
'network_description': '',
}
network_reservation_create.assert_called_once_with(network_values)
calls = [
mock.call(
{'network_id': 'network1',
'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
}),
]
network_allocation_create.assert_has_calls(calls)
def test_create_reservation_with_missing_param_properties(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE,
}
self.assertRaises(
manager_exceptions.MissingParameter,
self.fake_network_plugin.reserve_resource,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_create_reservation_with_invalid_param_before_end(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'before_end': 'invalid',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE,
'network_name': 'foo-net',
'network_properties': '',
'resource_properties': '',
}
self.assertRaises(
manager_exceptions.MalformedParameter,
self.fake_network_plugin.reserve_resource,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_update_reservation_shorten(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 30),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
self.patch(self.db_api, 'network_allocation_get_all_by_values')
self.fake_network_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
network_reservation_get.assert_not_called()
def test_update_reservation_extend(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(
self.db_api, 'network_get_all_by_queries')
network_get_all_by_queries.return_value = [{'id': 'network1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
]
network_allocation_create = self.patch(
self.db_api,
'network_allocation_create')
network_allocation_destroy = self.patch(
self.db_api,
'network_allocation_destroy')
self.fake_network_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
network_allocation_create.assert_not_called()
network_allocation_destroy.assert_not_called()
def test_update_reservation_move_failure(self):
values = {
'start_date': datetime.datetime(2013, 12, 20, 20, 00),
'end_date': datetime.datetime(2013, 12, 20, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'active'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api,
'network_reservation_get')
network_reservation_get.return_value = {
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(self.db_api,
'network_get_all_by_queries')
network_get_all_by_queries.return_value = [{'id': 'network1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 20, 20, 30),
datetime.datetime(2013, 12, 20, 21, 00))
]
matching_networks = self.patch(
self.fake_network_plugin, '_matching_networks')
matching_networks.return_value = []
self.assertRaises(
manager_exceptions.NotEnoughNetworksAvailable,
self.fake_network_plugin.update_reservation,
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
reservation_get.assert_called()
def test_update_reservation_move_overlap(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 30),
'end_date': datetime.datetime(2013, 12, 19, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api,
'network_reservation_get')
network_reservation_get.return_value = {
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(self.db_api,
'network_get_all_by_queries')
network_get_all_by_queries.return_value = [{'id': 'network1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 19, 20, 30),
datetime.datetime(2013, 12, 19, 21, 00))
]
network_allocation_create = self.patch(
self.db_api,
'network_allocation_create')
network_allocation_destroy = self.patch(
self.db_api,
'network_allocation_destroy')
self.fake_network_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
network_allocation_create.assert_not_called()
network_allocation_destroy.assert_not_called()
def test_update_reservation_move_realloc(self):
values = {
'start_date': datetime.datetime(2013, 12, 20, 20, 00),
'end_date': datetime.datetime(2013, 12, 20, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api,
'network_reservation_get')
network_reservation_get.return_value = {
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(self.db_api,
'network_get_all_by_queries')
network_get_all_by_queries.return_value = [{'id': 'network1'},
{'id': 'network2'}]
network_allocation_create = self.patch(
self.db_api,
'network_allocation_create')
network_allocation_destroy = self.patch(
self.db_api,
'network_allocation_destroy')
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 20, 20, 30),
datetime.datetime(2013, 12, 20, 21, 00))
]
matching_networks = self.patch(
self.fake_network_plugin, '_matching_networks')
matching_networks.return_value = ['network2']
self.fake_network_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
network_reservation_get.assert_called_with(
u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
network_allocation_destroy.assert_called_with(
'dd305477-4df8-4547-87f6-69069ee546a6')
network_allocation_create.assert_called_with(
{
'network_id': 'network2',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
def test_update_reservation_realloc_with_properties_change(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'network_properties': '["=", "$network_type", "vlan"]',
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api, 'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(self.db_api,
'network_get_all_by_queries')
network_get_all_by_queries.return_value = [{'id': 'network2'}]
matching_networks = self.patch(
self.fake_network_plugin, '_matching_networks')
matching_networks.return_value = ['network2']
network_allocation_create = self.patch(self.db_api,
'network_allocation_create')
network_allocation_destroy = self.patch(self.db_api,
'network_allocation_destroy')
network_reservation_update = self.patch(self.db_api,
'network_reservation_update')
self.fake_network_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
network_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_networks.assert_called_with(
'["=", "$network_type", "vlan"]',
'',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
network_allocation_create.assert_called_with(
{
'network_id': 'network2',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
network_allocation_destroy.assert_called_with(
'dd305477-4df8-4547-87f6-69069ee546a6'
)
network_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'network_properties': '["=", "$network_type", "vlan"]'}
)
def test_update_reservation_no_requested_networks_available(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'resource_properties': '["=", "$segment_id", "2345"]'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'network_properties': '["=", "$network_type", "vlan"]',
'resource_properties': ''
}
network_allocation_get_all = self.patch(
self.db_api, 'network_allocation_get_all_by_values')
network_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'network_id': 'network1'
}
]
network_get_all_by_queries = self.patch(self.db_api,
'network_get_all_by_queries')
network_get_all_by_queries.return_value = []
matching_networks = self.patch(
self.fake_network_plugin, '_matching_networks')
matching_networks.return_value = []
self.assertRaises(
manager_exceptions.NotEnoughNetworksAvailable,
self.fake_network_plugin.update_reservation,
'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_on_start(self):
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'project_id': '456'
}
reservation_get = self.patch(
self.db_api, 'reservation_get')
reservation_get.return_value = {
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'network_id': None,
'network_name': 'foo-net',
'network_description': None,
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c'
}
network_allocation_get_all_by_values = self.patch(
self.db_api, 'network_allocation_get_all_by_values')
network_allocation_get_all_by_values.return_value = [
{'network_id': 'network1'},
]
network_get = self.patch(self.db_api, 'network_get')
network_get.return_value = {
'network_id': 'network1',
'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 1234
}
create_network = self.patch(self.neutron_client, 'create_network')
create_network.return_value = {
'network': {
'id': '69cab064-0e60-4efb-a503-b42dde0fb3f2',
'name': 'foo-net'
}
}
network_reservation_update = self.patch(
self.db_api,
'network_reservation_update')
self.fake_network_plugin.on_start(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
create_network.assert_called_with(
body={
'network': {
'provider:segmentation_id': 1234,
'name': 'foo-net',
'project_id': '456',
'provider:physical_network': 'physnet1',
'provider:network_type': 'vlan'}})
network_reservation_update.assert_called_with(
'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
{'network_id': '69cab064-0e60-4efb-a503-b42dde0fb3f2'})
def test_on_start_failure(self):
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'project_id': '456'
}
reservation_get = self.patch(
self.db_api, 'reservation_get')
reservation_get.return_value = {
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
}
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'network_id': None,
'network_name': 'foo-net',
'network_description': None,
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c'
}
network_allocation_get_all_by_values = self.patch(
self.db_api, 'network_allocation_get_all_by_values')
network_allocation_get_all_by_values.return_value = [
{'network_id': 'network1'},
]
network_get = self.patch(self.db_api, 'network_get')
network_get.return_value = {
'network_id': 'network1',
'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': 1234
}
def fake_create_network(*args, **kwargs):
raise manager_exceptions.NetworkCreationFailed
create_network = self.patch(self.neutron_client, 'create_network')
create_network.side_effect = fake_create_network
self.assertRaises(manager_exceptions.NetworkCreationFailed,
self.fake_network_plugin.on_start,
'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
def test_on_end(self):
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'network_id': '69cab064-0e60-4efb-a503-b42dde0fb3f2',
'network_name': 'foo-net',
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'status': 'active'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'trust_id': 'exxee111qwwwwe'
}
network_reservation_update = self.patch(
self.db_api,
'network_reservation_update')
network_allocation_get_all_by_values = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'network_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
network_allocation_destroy = self.patch(
self.db_api,
'network_allocation_destroy')
delete_network = self.patch(self.neutron_client, 'delete_network')
delete_network.return_value = None
self.fake_network_plugin.on_end(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
network_reservation_update.assert_called_with(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'})
network_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
delete_network.assert_called_with(
'69cab064-0e60-4efb-a503-b42dde0fb3f2')
def test_on_end_failure(self):
network_reservation_get = self.patch(
self.db_api, 'network_reservation_get')
network_reservation_get.return_value = {
'id': '04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'network_id': '69cab064-0e60-4efb-a503-b42dde0fb3f2',
'network_name': 'foo-net',
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'status': 'active'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'trust_id': 'exxee111qwwwwe'
}
network_reservation_update = self.patch(
self.db_api,
'network_reservation_update')
network_allocation_get_all_by_values = self.patch(
self.db_api,
'network_allocation_get_all_by_values')
network_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'network_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
network_allocation_destroy = self.patch(
self.db_api,
'network_allocation_destroy')
def fake_delete_network(*args, **kwargs):
raise manager_exceptions.NetworkDeletionFailed
delete_network = self.patch(self.neutron_client, 'delete_network')
delete_network.side_effect = fake_delete_network
self.assertRaises(manager_exceptions.NetworkDeletionFailed,
self.fake_network_plugin.on_end,
'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
network_reservation_update.assert_called_with(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'})
network_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
delete_network.assert_called_with(
'69cab064-0e60-4efb-a503-b42dde0fb3f2')
def test_list_resource_properties(self):
self.db_list_resource_properties = self.patch(
self.db_api, 'resource_properties_list')
# Expecting a list of (Reservation, Allocation)
self.db_list_resource_properties.return_value = [
('prop1', False, 'aaa'),
('prop1', False, 'bbb'),
('prop2', False, 'aaa'),
('prop2', False, 'aaa'),
('prop3', True, 'aaa')
]
expected = [
{'property': 'prop1'},
{'property': 'prop2'}
]
ret = self.fake_network_plugin.list_resource_properties(query=None)
# Sort returned value to use assertListEqual
ret.sort(key=lambda x: x['property'])
self.assertListEqual(expected, ret)
self.db_list_resource_properties.assert_called_once_with(
'network')
def test_list_resource_properties_with_detail(self):
self.db_list_resource_properties = self.patch(
self.db_api, 'resource_properties_list')
# Expecting a list of (Reservation, Allocation)
self.db_list_resource_properties.return_value = [
('prop1', False, 'aaa'),
('prop1', False, 'bbb'),
('prop2', False, 'ccc'),
('prop3', True, 'aaa')
]
expected = [
{'property': 'prop1', 'private': False, 'values': ['aaa', 'bbb']},
{'property': 'prop2', 'private': False, 'values': ['ccc']}
]
ret = self.fake_network_plugin.list_resource_properties(
query={'detail': True})
# Sort returned value to use assertListEqual
ret.sort(key=lambda x: x['property'])
self.assertListEqual(expected, ret)
self.db_list_resource_properties.assert_called_once_with(
'network')
def test_update_resource_property(self):
resource_property_values = {
'resource_type': 'network',
'private': False}
db_resource_property_update = self.patch(
self.db_api, 'resource_property_update')
self.fake_network_plugin.update_resource_property(
'foo', resource_property_values)
db_resource_property_update.assert_called_once_with(
'network', 'foo', resource_property_values)
```
#### File: utils/openstack/heat.py
```python
from heatclient import client as heat_client
from keystoneauth1 import session
from keystoneauth1 import token_endpoint
from oslo_config import cfg
from oslo_log import log as logging
from blazar import context
from blazar.utils.openstack import base
heat_opts = [
cfg.StrOpt(
'heat_api_version',
default='1',
deprecated_group='DEFAULT',
help='Heat API version'),
cfg.StrOpt(
'orchestration_service',
default='orchestration',
deprecated_group='DEFAULT',
help='Heat name in keystone')
]
CONF = cfg.CONF
CONF.register_opts(heat_opts, group='heat')
CONF.import_opt('identity_service', 'blazar.utils.openstack.keystone')
LOG = logging.getLogger(__name__)
class BlazarHeatClient(object):
def __init__(self, ctx=None):
if ctx is None:
ctx = context.current()
endpoint_override = base.url_for(
ctx.service_catalog,
CONF.heat.orchestration_service,
os_region_name=CONF.os_region_name)
auth = token_endpoint.Token(endpoint_override, ctx.auth_token)
sess = session.Session(auth=auth)
self.heat = heat_client.Client(
CONF.heat.heat_api_version, session=sess)
def __getattr_(self, name):
return getattr(self.heat, name)
```
#### File: utils/openstack/neutron.py
```python
import netaddr
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.v2_0 import client as neutron_client
from oslo_log import log as logging
from blazar.utils.openstack import base
from blazar.utils.openstack import exceptions
LOG = logging.getLogger(__name__)
class BlazarNeutronClient(object):
"""Client class for Neutron service."""
def __init__(self, **kwargs):
client_kwargs = base.client_kwargs(**kwargs)
self.neutron = neutron_client.Client(**client_kwargs)
def __getattr__(self, attr):
return getattr(self.neutron, attr)
class FloatingIPPool(BlazarNeutronClient):
def __init__(self, network_id, **kwargs):
super(FloatingIPPool, self).__init__(**kwargs)
try:
self.neutron.show_network(network_id)
except neutron_exceptions.NotFound:
LOG.info('Failed to find network %s.', network_id)
raise exceptions.FloatingIPNetworkNotFound(network=network_id)
self.network_id = network_id
def fetch_subnet(self, floatingip):
fip = netaddr.IPAddress(floatingip)
network = self.neutron.show_network(self.network_id)['network']
subnet_ids = network['subnets']
for sub_id in subnet_ids:
subnet = self.neutron.show_subnet(sub_id)['subnet']
cidr = netaddr.IPNetwork(subnet['cidr'])
# skip the subnet because it has not valid cidr for the floating ip
if fip not in cidr:
continue
allocated_ip = netaddr.IPSet()
allocated_ip.add(netaddr.IPAddress(subnet['gateway_ip']))
for alloc in subnet['allocation_pools']:
allocated_ip.add(netaddr.IPRange(alloc['start'], alloc['end']))
if fip in allocated_ip:
raise exceptions.NeutronUsesFloatingIP(floatingip=fip,
subnet=subnet['id'])
else:
self.subnet_id = subnet['id']
return subnet
raise exceptions.FloatingIPSubnetNotFound(fip=floatingip)
def create_reserved_floatingip(self, subnet_id, address, project_id,
reservation_id):
body = {
'floatingip': {
'floating_network_id': self.network_id,
'subnet_id': subnet_id,
'floating_ip_address': address,
'project_id': project_id
}
}
fip = self.neutron.create_floatingip(body)['floatingip']
body = {
'tags': ['blazar', 'reservation:%s' % reservation_id]
}
self.neutron.replace_tag('floatingips', fip['id'], body)
def delete_reserved_floatingip(self, address):
query = {
'floating_ip_address': address,
'floating_network_id': self.network_id
}
fips = self.neutron.list_floatingips(**query)['floatingips']
if not fips:
# The floating ip address already deleted by the user.
return None
fip = next(iter(fips))
if fip['port_id']:
# Deassociate the floating ip from the attached port because
# the delete floatingip API deletes both the floating ip and
# associated port.
body = {
'floatingip': {
'port_id': None,
}
}
self.neutron.update_floatingip(fip['id'], body)
self.neutron.delete_floatingip(fip['id'])
```
#### File: blazar/utils/trusts.py
```python
from oslo_config import cfg
from blazar import context
from blazar.utils.openstack import keystone
import functools
CONF = cfg.CONF
def create_trust():
"""Creates trust via Keystone API v3 to use in plugins."""
trustee_id = keystone.BlazarKeystoneClient().session.get_user_id()
ctx = context.current()
user_client = keystone.BlazarKeystoneClient(as_user=True)
trust = user_client.trusts.create(trustor_user=ctx.user_id,
trustee_user=trustee_id,
impersonation=True,
role_names=ctx.roles,
project=ctx.project_id)
return trust
def delete_trust(lease):
"""Deletes trust for the specified lease."""
if lease.trust_id:
client = keystone.BlazarKeystoneClient(trust_id=lease.trust_id)
client.trusts.delete(lease.trust_id)
def create_ctx_from_trust(trust_id):
"""Return context built from given trust."""
ctx = context.current()
client = keystone.BlazarKeystoneClient(trust_id=trust_id)
session = client.session
# use 'with ctx' statement in the place you need context from trust
return context.BlazarContext(
user_name=ctx.user_name,
user_domain_name=ctx.user_domain_name,
auth_token=session.get_token(),
project_id=session.get_project_id(),
service_catalog=(
ctx.service_catalog or
session.auth.get_auth_ref(
session=session
).service_catalog.normalize_catalog()),
request_id=ctx.request_id,
global_request_id=ctx.global_request_id
)
def use_trust_auth():
"""Decorator creates a keystone trust
This decorator creates a keystone trust, and adds the trust_id to the
parameter of the decorated method.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(self, to_update):
if to_update is not None:
trust = create_trust()
if isinstance(to_update, dict):
to_update.update({'trust_id': trust.id})
elif isinstance(to_update, object):
setattr(to_update, 'trust_id', trust.id)
return func(self, to_update)
return wrapped
return decorator
``` |
{
"source": "4sfc/django-common-files",
"score": 3
} |
#### File: common_files/admin/timestamp.py
```python
from django.contrib import admin
class TimestampAdmin(admin.ModelAdmin):
"""TimestampAdmin has custom save_formset and save_model functions"""
def save_formset(self, request, form, formset, change):
"""Save created_by and modified_by users."""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if instance.pk is None:
instance.created_by = request.user
instance.modified_by = request.user
instance.save()
formset.save_m2m()
def save_model(self, request, obj, form, change):
"""Save created_by and modified_by users."""
if obj.pk is None:
obj.created_by = request.user
obj.modified_by = request.user
super().save_model(request, obj, form, change)
```
#### File: common_files/models/base.py
```python
from django.db import models
from common_files.models.timestamp import Timestamp
class Base(Timestamp):
"""Abstract base class Base has label, value, and active fields"""
label = models.CharField(max_length=191, unique=True)
value = models.CharField(max_length=10, unique=True)
active = models.BooleanField(default=True, null=False)
def __str__(self):
return self.label
class Meta:
constraints = [
models.UniqueConstraint(fields=['label', 'value'],
name='unique_label_value')
]
abstract = True
``` |
{
"source": "4sfc/django-member-manager",
"score": 3
} |
#### File: member_manager/tests/availability.py
```python
from django.contrib.auth.models import User
from django.test import TestCase
from member_manager.models import Availability
from member_manager.models import Profile
class AvailabilityTest(TestCase):
"""Test Availability model"""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='foo',
email='<EMAIL>')
cls.profile = Profile.objects.create(first_name='foo',
last_name='bar',
email='<EMAIL>',
user=cls.user,
created_by=cls.user,
modified_by=cls.user)
def test_str(self):
"""Test Availability string"""
period = Availability.objects.create(profile=self.profile,
weekday=Availability.SUNDAY,
start_time=Availability.NINE_AM,
end_time=Availability.ONE_PM,
created_by=self.user,
modified_by=self.user)
self.assertEqual(str(period), 'Sunday 9:00 am - 1:00 pm')
```
#### File: member_manager/tests/profile_skill.py
```python
from django.contrib.auth.models import User
from django.test import TestCase
from member_manager.models import Profile
from member_manager.models import ProfileSkill
from member_manager.models import Skill
class ProfileSkillTest(TestCase):
"""Test ProfileSkill model"""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='foo', email='<EMAIL>')
cls.profile = Profile.objects.create(first_name='foo', last_name='bar',
email='<EMAIL>',
user=cls.user,
created_by=cls.user,
modified_by=cls.user)
cls.skill = Skill.objects.create(label='Programming', value='pr',
created_by=cls.user,
modified_by=cls.user)
def test_str(self):
"""Test ProfileSkill string"""
a_profile = ProfileSkill.objects.create(profile=self.profile,
skill=self.skill,
created_by=self.user,
modified_by=self.user)
self.assertEqual(str(a_profile), 'foo bar - Programming')
```
#### File: member_manager/tests/skill.py
```python
from django.contrib.auth.models import User
from django.test import TestCase
from member_manager.models import Skill
class SkillTest(TestCase):
"""Test Skill model"""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='foo', email='<EMAIL>')
cls.skill = Skill.objects.create(label='Programming', value='pr',
created_by=cls.user,
modified_by=cls.user)
def test_str(self):
"""Test Skill string"""
self.assertEqual(str(self.skill), 'Programming')
``` |
{
"source": "4SH3N/Batch-Files",
"score": 3
} |
#### File: 4SH3N/Batch-Files/Find_and_Trace_IP.py
```python
import socket
def main():
remote_host = "Website"
try:
print("IP Address Of " + remote_host + " Is " + socket.gethostbyname(remote_host))
except socket.error as e:
print("Error : {} ".format(e))
if __name__ == "__main__":
main()
import os
import urllib2
import json
while True:
ip=raw_input ("What is your ip target IP: ")
url = "http://ip-api.com/json/"
response = urllib2.urlopen(url + ip)
data = response.read()
values = json.loads (data)
print(" IP: " + values['query'])
print(" City: " + values['city'])
print("Isp: " + values['isp'])
print("Country: " + values['country'])
print("Region: " + values['region'])
print("Time xone: " + values['timezone'])
break
``` |
{
"source": "4shadoww/yunomi",
"score": 3
} |
#### File: core/rules/abusefilters.py
```python
import datetime
from core.rule_core import *
from core import yapi as api
from core import timelib
class RuleModule:
name = "abusefilters"
cfg_ver = None
config = {
"filters": [11, 30, 34, 38, 55, 58, 98, 125, 133],
"hours": 1,
"rules": [
{
"hits": 1,
"expiry": 24,
"score": 1
},
{
"hits": 5,
"expiry": 24,
"score": 2
}
]
}
def run(self, rev):
score = 0
expiry = None
end = datetime.timedelta(hours=self.config["hours"])
time = timelib.to_string(datetime.datetime.utcnow()-end)
result = api.get_abuse_filter(rev["user"], time, self.config["filters"])
if "error" in result:
logger.error("abusefilters error: %s" % result["error"]["code"])
return score, expiry
for rule in self.config["rules"]:
if rule["hits"] <= len(result["query"]["abuselog"]):
if score < rule["score"]:
score = rule["score"]
expiry = rule["expiry"]
return score, expiry
```
#### File: core/rules/adminaction.py
```python
from core.rule_core import *
from core import yapi as api
import datetime
class RuleModule:
name = "adminaction"
cfg_ver = None
config = {
"months": 1,
"stabilizes": 2,
"list_path": "Käyttäjä:VakauttajaBot/Pitkäaikaista vakautusta vaativat sivut"
}
def add_to_list(self, rev):
text = api.get_text(self.config["list_path"])
newpage = "* [["+rev["title"]+"]]"
if(newpage in text):
return
text += "\n"+newpage
logger.info("adding page "+rev["title"]+" to stabilize list")
api.save_page(self.config["list_path"], text, (config_loader.dictionary[config_loader.cur_conf["core"]["lang"]]["editsum"]["LS"] % rev["title"]), minor=True)
def run(self, rev):
score = 0
expiry = None
leend = datetime.datetime.utcnow() - datetime.timedelta(days=self.config["months"] * 30, hours=0, minutes=0, seconds=0)
stable_log = api.get_stable_log(rev["title"], str(leend).split('.', 2)[0])
if(len(stable_log["query"]["logevents"]) >= self.config["stabilizes"]):
if(not config_loader.cur_conf["core"]["test"]):
self.add_to_list(rev)
return score, expiry
```
#### File: core/rules/anonreverts.py
```python
from core.rule_core import *
from core import op
from core import yapi as api
class RuleModule:
name = "anonreverts"
cfg_ver = None
config = [
{
"expiry": 24,
"hours": 1,
"reverts_required": 2,
"score": 1,
"groups": ["autoconfirmed"]
}
]
def run(self, rev):
for rule in self.config:
ip_reverts = 0
reverts = op.get_reverts(rev["title"], hours=rule["hours"])
if not reverts:
continue
if len(reverts) >= rule["reverts_required"]:
for revert in reverts:
if revert["reverter"] != revert["victim"]:
victim_groups = api.get_user_rights(revert["victim"])
reverter_groups = api.get_user_rights(revert["reverter"])
if not victim_groups or not reverter_groups:
continue
if all(i not in victim_groups for i in rule["groups"]) or all(i not in reverter_groups for i in rule["groups"]):
ip_reverts += 1
if ip_reverts >= rule["reverts_required"]:
return rule["score"], rule["expiry"]
return 0, None
``` |
{
"source": "4shaw/django-imger",
"score": 2
} |
#### File: django-imger/imger/widgets.py
```python
from django import forms
from django.utils.safestring import mark_safe
from django.conf import settings
import json
class ImgerWidget(forms.Widget):
def __init__(self, attrs=None, **kwargs):
self.imger_settings = attrs['imger_settings']
super(ImgerWidget, self).__init__(**kwargs)
class Media:
js = (
'imger/js/jquery-1.11.1.min.js',
'imger/js/jquery.nouislider.js',
'imger/js/form2js.js',
'imger/js/canvas-to-blob.min.js',
'imger/js/imger-compress.js',
'imger/js/imger-ui.js',
'imger/js/imger-init.js'
)
css = {
'all': (
'imger/css/bootstrap.css',
'imger/css/bootstrap-theme.css',
'imger/css/imger.css',
'imger/css/jquery.nouislider.css'
)
}
def render(self, name, value, attrs=None):
imger_settings = self.imger_settings
imger_json = json.dumps(imger_settings)
static_url = settings.STATIC_URL
if value is None:
currently = r''
current_link = r'Nothing yet'
else:
currently = r'%s' % (value)
current_link = r'<a href="%s%s">%s</a>' % (
settings.MEDIA_URL,
value,
value
)
if not static_url.endswith('/'):
static_url = r'%s/' % (static_url)
return mark_safe("<p>Currently: %s<br/>Change: <span><button data-static_url=\"%s\" data-imger='%s' class=\"ImgerBrowseBTN\" type=\"button\">Browse</button> <span class=\"ImgerBrowseLabel\">No image selected...</span><input value=\"%s\" class=\"ImgerDataURL\" name=\"%s\" type=\"hidden\" /></span></p>" % (current_link, static_url, imger_json, currently, name))
``` |
{
"source": "4shub/tmuxer",
"score": 2
} |
#### File: 4shub/tmuxer/setup.py
```python
from setuptools import setup, find_packages
def read_file(fname):
with open(fname, 'r') as f:
return f.read()
setup(
name="tmuxer",
version='0.0.2',
author='<NAME>',
author_email='<EMAIL>',
description='Quick tool that creates tmux interfaces from a conf file',
long_description=read_file('./README.md'),
url='https://github.com/4shub/quicklinks/',
py_modules=['tmuxer'],
zip_safe=False,
install_requires=read_file('./dependencies.txt'),
license='MIT',
entry_points= {
"console_scripts": [
"tmuxer = tmuxer:tmuxer",
]
}
)
``` |
{
"source": "4shub/weExist",
"score": 2
} |
#### File: weExist/src/auth.py
```python
from __future__ import print_function
import httplib2
import os
import re
import time
import base64
from apiclient import discovery
from apiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
SCOPES = 'https://mail.google.com/'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
credentials = get_credentials()
if __name__ == '__main__':
main()
``` |
{
"source": "4SiB/togglore",
"score": 3
} |
#### File: togglore/togglore/config.py
```python
import configparser
import datetime
class Config(object):
def __init__(self, api_key=None, work_hours_per_day=8.4, excluded_days=[], user_id=1, workspace=1):
self.api_key = api_key
self.work_hours_per_day = work_hours_per_day
self.excluded_days = excluded_days
self.user_id = user_id
self.workspace = workspace
def write_to_file(self, path):
cfg = configparser.ConfigParser()
cfg['Authentication'] = {}
cfg['Authentication']['API_KEY'] = self.api_key
with open(path, 'w') as configfile:
cfg.write(configfile)
@classmethod
def read_from_file(cls, path):
cfg = configparser.ConfigParser()
cfg.read(path)
api_key = cfg['Authentication']['API_KEY']
work_hours = cfg['Work Hours']['hours_per_day']
excluded_days_string = cfg['Work Hours']['excluded_days']
user_id = cfg['User Info']['id']
workspace = cfg['User Info']['workspace']
day_strings = excluded_days_string.split(',')
days = []
for day_string in day_strings:
days.append(datetime.datetime.strptime(day_string, "%Y.%m.%d").date())
return cls(api_key=api_key, work_hours_per_day=float(work_hours), excluded_days=days, user_id=user_id,
workspace=workspace)
```
#### File: togglore/togglore/utils.py
```python
import datetime
import calendar
def sum_time_of_entries(entries):
ms = 0
for entry in entries:
ms += entry['dur']
return ms / 3600000.0
class WorkTimeCalculator(object):
def __init__(self, work_hours_per_day=8.4, excluded_days=[]):
self.work_hours_per_day = work_hours_per_day
self.excluded_days = excluded_days
def count_workdays_in_range(self, date_range):
current = date_range.start
workdays = 0
while current <= date_range.end:
if current.isoweekday() not in [6, 7] and current not in self.excluded_days:
workdays += 1
current += datetime.timedelta(1)
return workdays
def time_to_work_in_range(self, date_range):
weekdays = self.count_workdays_in_range(date_range)
return weekdays * self.work_hours_per_day
class DateRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
@classmethod
def since(cls, start):
start = datetime.datetime.strptime(start, "%Y.%m.%d").date()
end = datetime.date.today()
return cls(start, end)
@classmethod
def parse_from_iso_strings(cls, start, end):
start = datetime.datetime.strptime(start, "%Y.%m.%d").date()
end = datetime.datetime.strptime(end, "%Y.%m.%d").date()
return cls(start, end)
@classmethod
def today(cls):
return cls(datetime.date.today(), datetime.date.today())
@classmethod
def this_week(cls):
today = datetime.date.today()
start = today - datetime.timedelta(today.weekday())
end = start + datetime.timedelta(6)
return cls(start, end)
@classmethod
def this_month(cls):
today = datetime.date.today()
__, end_day = calendar.monthrange(today.year, today.month)
start = datetime.date(today.year, today.month, 1)
end = datetime.date(today.year, today.month, end_day)
return cls(start, end)
@classmethod
def this_year(cls):
today = datetime.date.today()
start = datetime.date(today.year, 1, 1)
end = datetime.date(today.year, 12, 31)
return cls(start, end)
@classmethod
def month(cls, month):
today = datetime.date.today()
__, end_day = calendar.monthrange(today.year, month)
start = datetime.date(today.year, month, 1)
end = datetime.date(today.year, month, end_day)
return cls(start, end)
``` |
{
"source": "4sOnJns/http_requests",
"score": 3
} |
#### File: 4sOnJns/http_requests/http_requests.py
```python
import os
import requests
import requests.exceptions
import json
repos = open("places.json")
repoData = json.load(repos)
alreadyRetrieved = []
def isRetrieved(place):
return place in alreadyRetrieved
for index, place in enumerate(repoData['places']):
print(">----------<")
if (place is None):
print(
f"Failed to get weather at {str(index)} because insufficient data was supplied.")
continue
if (isRetrieved(place)):
print(
f'Skipped fetching weather for {place} as it was already fetched.')
continue
print(f"Fetching weather: {place}")
try:
res = requests.get(f"https://wttr.in/{place}?format=j1", headers={"Accept": "application/json"})
res.raise_for_status()
responseData = json.loads(str("{\"weather\": " + res.text + "}"))
file = open(
f"weather/{place}.json", "w")
file.write(json.dumps(responseData, indent=2, sort_keys=True))
file.close()
alreadyRetrieved.append(place)
print(f"Weather for {place} was found!")
except requests.exceptions.HTTPError:
print(f'Failed to find place {place}')
except Exception as e:
print(f"Failed to find weather for {place}")
print("Reason:", end=" ")
print(e)
print(">----------<")
print('Cleaning Up...')
def noLongerRequired(place):
if (place not in repoData['places']):
os.remove(f"./weather/{place}.json")
print(f'{place} was removed!')
for place in os.listdir('weather'):
if (not place.endswith('.txt')):
noLongerRequired(os.path.splitext(place)[0])
``` |
{
"source": "4soos/data_structures",
"score": 4
} |
#### File: py/tree/binary_tree.py
```python
from typing import List
class TreeNode:
def __init__(self, val: int = 0, left = None, right = None) -> None:
self.val = val
self.left = left
self.right = right
class Solution:
"""
递归模板:
def order_recursion(self, root: TreeNode = None) -> List[int]:
res = []
def dfs(root):
if not root: return
res.append(root.val) # Position 1
dfs(root.left) # Position 2
dfs(root.right) # Position 3
dfs(root)
return res
preorder_recursion: P1 -> P2 -> P3
inorder_recursion : P2 -> P1 -> P3
postorder_recursion: P2 -> P3 -> P2
======================================================================
迭代模板:
"""
def preorder_traversal_recursion(self, root: TreeNode = None) -> List[int]:
res = []
def dfs(root):
if not root: return
res.append(root.val)
dfs(root.left)
dfs(root.right)
dfs(root)
return res
def preorder_traversal_iteration(self, root: TreeNode = None) -> List[int]:
if not root: return []
stack, res = [root], []
while stack:
node = stack.pop()
if node:
res.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return res
def inorder_traversal_recursion(self, root: TreeNode = None) -> List[int]:
res = []
def dfs(root):
if not root: return
dfs(root.left)
res.append(root.val)
dfs(root.right)
dfs(root)
return res
def level_order(self, root: TreeNode = None) -> List[List[int]]:
if not root: return []
queue, res = [root], []
while queue:
res.append([node.val for node in queue])
ll = []
for node in queue:
if node.left:
ll.append(node.left)
if node.right:
ll.append(node.right)
queue = ll
return res
``` |
{
"source": "4soos/leetcode_practices",
"score": 3
} |
#### File: Feb/16/min_stack.py
```python
import math
class min_stack:
def __init__(self) -> None:
self.stack = []
self.min_stack = [math.inf]
def push(self, x: int) -> None:
self.stack.append(x)
self.min_stack.append(min(x, self.min_stack[-1]))
def pop(self) -> None:
self.stack.pop()
self.min_stack.pop()
def top(self) -> int:
return self.stack[-1]
def get_min(self) -> int:
return self.min_stack[-1]
``` |
{
"source": "4stropotato/pwned-by",
"score": 4
} |
#### File: versions/v0.20/client.py
```python
import socket
def ip_address():
global server_ip
serv_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serv_ip.connect(("8.8.8.8", 80))
server_ip = serv_ip.getsockname()[0]
serv_ip.close()
return server_ip
########################
server_ip = ip_address() # 8
server_port = 5555
########################
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 9
# 10 since the server is now listening..
client.connect((server_ip,server_port)) # 11
confirmation = 'Connect!'.encode('utf-8') # 17
client.send(confirmation) # 18
########################
# 8 Should be the same ip and port as the server.py
# 9 concatinating ip and tcp socket
# 11 initiate 3 way handshake to the server_ip and server_port, let's go back to server.py
# 17 let's convert ('Connect!') to bytes and assign it to confimation.
# 18 let's send the confirmation to the server. Let's go to server.py
########################
# pwning tmrw!
```
#### File: versions/v0.31s/server.py
```python
import socket
def ip_address():
global server_ip
serv_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serv_ip.connect(("8.8.8.8", 80))
server_ip = serv_ip.getsockname()[0]
serv_ip.close()
return server_ip
#########################
server_ip = ip_address()
server_port = 5555
#########################
# 11 receiving the message. since the clienct sent message twice, we will be receiving a message twice as well. One for the header, the other one for the body or the actual message.
def message_recv(size): # 12
while True: # 13
header = client.recv(size).decode('utf-8')
if header: # 14
body_size = int(header) # 15
body = client.recv(body_size).decode('utf-8') # 16
break # 17
return body # 18
def server_conn():
global server,client,ip
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((server_ip,server_port))
server.listen(5)
##### DELETABLE #####
from pathlib import Path # PATHLIB IS INJECTED SO THAT WE HAVE TO EXECUTE 1 FILE ONLY
exec(Path('client.py').read_text()) # PATHLIB IS INJECTED SO THAT WE HAVE TO EXECUTE 1 FILE ONLY
##### DELETABLE #####
client, ip = server.accept()
server_conn()
# testing
print(message_recv(2))
print(message_recv(2))
print(message_recv(2))
#########################
# 12 new function. message_recv with size 8
# 13 infinite loop until we confirm that we have received the header so we can assure that both codes will execute at the same time.
# 14 if we are not receiving a message it will automatically give as false, but if the header has a value, then it will be turning True
# 15 received bytes with white spaces b'5 '. if we convert it to int it will be going to 5. and that 5 will be the new size for the body
# 16 the body size for the body is the size that we gave from the header.
# 17 after a sucessful conversation we will break the loop.
# 18 returning the body's value
#########################
# pwning tmrw!
``` |
{
"source": "4surix/discord-py-slash-command",
"score": 3
} |
#### File: discord-py-slash-command/discord_slash/error.py
```python
class SlashCommandError(Exception):
"""
All exceptions of this extension can be captured with this.
Note that discord.py doesn't trigger `on_command_error` event.
"""
pass
class RequestFailure(SlashCommandError):
"""
Request to Discord API has failed.
:ivar status: Status code of failed response.
:ivar msg: Message of failed response.
"""
def __init__(self, status: int, msg: str):
self.status = status
self.msg = msg
super().__init__(f"Request failed with resp: {self.status} | {self.msg}")
class IncorrectFormat(SlashCommandError):
"""
Some formats are incorrect. See Discord API DOCS for proper format.
"""
def __init__(self, msg: str):
super().__init__(msg)
class DuplicateCommand(SlashCommandError):
"""
There is a duplicate command name.
"""
def __init__(self, name: str):
super().__init__(f"Duplicate command name detected: {name}")
``` |
{
"source": "4teamwork/django-utils",
"score": 2
} |
#### File: django_utils/views/file_getter.py
```python
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404
from django.views.generic import View
from django_sendfile import sendfile
from django.contrib.contenttypes.models import ContentType
import base64
from urllib import parse
class FileGetterView(LoginRequiredMixin, View):
"""
This view will deliver all the files and check if the user is logged in. As the whole application is only
available privately, we also need to protect our user generated content (images etc.)
"""
http_method_names = ["get"]
def get(self, request, file_info):
obj, file = self.get_object_from_url(request, file_info)
if not obj:
raise Http404
return sendfile(request, file.path)
def get_object_from_url(self, request, url):
"""
Resolve objects based on urls provided by "get_url_for_object".
If any of this fails, a 404 should be raised in the view, but logging information for developers must be available.
"""
try:
(app, model, id, field, hash) = (
base64.urlsafe_b64decode(parse.unquote(url)).decode().split("|")
)
model = ContentType.objects.get(app_label=app, model=model)
obj = self.get_object(model.model_class(), id, request=request, field=field)
return obj, getattr(obj, field)
except Exception:
return None, None
def get_object(self, model_class, id, **kwargs):
return model_class._default_manager.get(id=id)
``` |
{
"source": "4teamwork/ftw.aare",
"score": 3
} |
#### File: ftw.aare/aare/aare.py
```python
import datetime
import itertools
import locale
import pdb
import click
import requests
LANGUAGE = "en"
session = requests.Session()
def _(s):
global LANGUAGE
de = {"Current temperature of the aare: {}° C": "Aktuelle Temperatur der Aare: {}° C",
"{}: min {}° C, max {}° C": "{}: min {}° C, max {}° C"}
if LANGUAGE == "de":
return de[s]
else:
return s
@click.command()
@click.option('--statistics', help='show the hottest and the coolest temperature of the last 7 days', is_flag="True")
@click.option('--language', default="", help='set the language of the output. Possible languages: "de" (German), '
'"en" (English). The output is displayed in the OS language by default, '
'it will also be displayed english if an invalid argument is passed.')
def aare(statistics, language):
"""A simple command which displays the current aare temperature"""
if language != "":
set_language(language)
else:
set_system_language()
if statistics:
display_stats()
else:
display_current_temp()
def set_language(lang):
global LANGUAGE
LANGUAGE = lang
def display_current_temp():
api_url = 'http://aare.schwumm.ch/aare.json'
response = session.get(api_url)
aare_json = response.json()
click.echo(_("Current temperature of the aare: {}° C").format(aare_json["temperature"]))
def display_stats():
api_url = 'http://aare.schwumm.ch/api/archive'
params = {"from": "now", "to": "-7 days"}
# The website denies unknown user agents. So we have to impersonate a general webbrowser to gain access
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36'}
response = session.get(api_url, params=params, headers=headers)
response_json = response.json()
temperatures = response_json['data']['temperature']
dates = [datetime.datetime.strptime(item, '%Y-%m-%d %H:%M:%S') for item in response_json['data']['datetime']]
data = list(zip(dates, temperatures))
# Remove data points not having a temperature
data = list(filter(lambda item: item[1], data))
for key, group in itertools.groupby(data, lambda item: datetime.date(item[0].year, item[0].month, item[0].day)):
temperatures = [item[1] for item in tuple(group)]
min_temp = min(temperatures)
max_temp = max(temperatures)
day_string = key.strftime('%Y-%m-%d')
output = _("{}: min {}° C, max {}° C").format(day_string, min_temp, max_temp)
print(output)
def set_system_language():
locale_tuple = locale.getdefaultlocale()
lang = locale_tuple[0]
if lang == "de_DE" or lang == "de_CH":
set_language("de")
if __name__ == '__main__':
aare()
``` |
{
"source": "4the1appdevs/face_recognition",
"score": 3
} |
#### File: face_recognition/cam/recognizer.py
```python
from multiprocessing import Process
from cam.models import dlib_model, classifier_model
import pickle
import dlib
import cv2
import numpy as np
import time
def get_time():
localtime = time.localtime()
capturetime = time.strftime("%Y-%m-%d %H:%M:%S", localtime)
return capturetime
class FaceRecognition(Process):
def __init__(self, img_queue, rst_queue):
super().__init__()
self.img_queue = img_queue
self.rst_queue = rst_queue
def init(self):
# face detector
# face_detector = dlib.cnn_face_detection_model_v1(dlib_model.cnn_face_detector_model_location())
self.face_detector = dlib.get_frontal_face_detector()
# face recognition
self.sp = dlib.shape_predictor(dlib_model.pose_predictor_model_location())
self.face_rec = dlib.face_recognition_model_v1(dlib_model.face_recognition_model_location())
def run(self):
self.init()
with open(classifier_model.classifier_location(), 'rb') as classifier:
(model, name_lib, feature_lib) = pickle.load(classifier)
while True:
try:
frame = self.img_queue.get()
except:
pass
else:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_dets = self.face_detector(frame_rgb, 0)
feature_lists=[]
if len(frame_dets) :
for i, det in enumerate(frame_dets):
if type(det) == dlib.rectangle:
shape = self.sp(frame_rgb, det)
else:
shape = self.sp(frame_rgb, det.rect)
face_descriptor = self.face_rec.compute_face_descriptor(frame_rgb, shape, 1)
feature_lists.append(face_descriptor)
if len(feature_lists):
predictions = model.predict(feature_lists)
namestr = ''
for idx, targ in enumerate(predictions):
distance = np.linalg.norm(np.array(feature_lists[idx]) - np.array(feature_lib[targ]))
namestr += (name_lib[targ]+' ' if distance < 0.6 else 'unknown ' )
rst = get_time() + ":" + namestr
print(rst)
try:
self.rst_queue.put_nowait(namestr)
except:
self.rst_queue.get_nowait()
self.rst_queue.put_nowait(namestr)
```
#### File: face_recognition/cam/train.py
```python
import os
import dlib
import cv2
import pickle
from sklearn.svm import SVC
from cam.models import dlib_model, classifier_model
FaceDetection = dlib.get_frontal_face_detector()
# face recognition
sp = dlib.shape_predictor(dlib_model.pose_predictor_model_location())
Description = dlib.face_recognition_model_v1(dlib_model.face_recognition_model_location())
def check_parameter(param, param_type, create_new_if_missing=False):
assert param_type == 'file' or param_type == 'directory'
if param_type == 'file':
assert os.path.exists(param)
assert os.path.isfile(param)
else:
if create_new_if_missing is True:
if not os.path.exists(param):
os.makedirs(param)
else:
assert os.path.isdir(param)
else:
assert os.path.exists(param)
assert os.path.isdir(param)
def listdir(top_dir, type='image'):
tmp_file_lists = os.listdir(top_dir)
file_lists = []
if type == 'image':
for e in tmp_file_lists:
if e.endswith('.jpg') or e.endswith('.png') or e.endswith('.bmp') or e.endswith('.JPG'):
file_lists.append(e)
elif type == 'dir':
for e in tmp_file_lists:
if os.path.isdir(top_dir + e):
file_lists.append(e)
else:
raise Exception('Unknown type in listdir')
return file_lists
def imread(f):
return cv2.cvtColor(cv2.imread(f), cv2.COLOR_BGR2RGB)
def extract_feature(src_dir, type_name):
check_parameter(src_dir, 'directory')
if src_dir[-1] != '/':
src_dir += '/'
image_lists = listdir(src_dir, type_name)
names_lists= []
feature_lists=[]
labels =[]
for n,e in enumerate(image_lists):
img=imread(''.join([src_dir,e]))
dets=FaceDetection(img,1) #upsampling
if len(dets)==0:
print("The faces of {} >>>>>>>>>> detecting defeat".format(e))
else:
params=e.split(".")
name_id=params[0]
print("ID : {} , Name : {} ".format(n,name_id))
Best_face=max(dets, key=lambda rect: rect.width() * rect.height())
shape = sp(img, Best_face)
face_descriptor=Description.compute_face_descriptor(img, shape, 10)
feature_lists.append(face_descriptor)
names_lists.append(name_id)
labels.append(n)
print('Training classifier')
model = SVC(kernel='linear', probability=True)
model.fit(feature_lists, labels)
# Saving classifier model
with open(classifier_model.classifier_location(), 'wb') as outfile:
pickle.dump((model, names_lists, feature_lists), outfile)
print('Saved classifier model to ', classifier_model.classifier_location())
def run():
print("wait for system init...")
extract_feature('./images','image')
print("training finished.")
if __name__ == '__main__':
run()
``` |
{
"source": "4theKnowledge/s-vaal",
"score": 2
} |
#### File: s-vaal/src/main.py
```python
import yaml
import random
import numpy as np
import math
import json
import torch
import torch.utils.data as data
from train import Trainer
from sampler import Sampler
from connections import load_config
class ActiveLearner(Trainer, Sampler):
def __init__(self):
Trainer.__init__(self)
self.initial_budget_frac = 0.10 # fraction of samples that AL starts with
self.val_frac = 0.05
self.test_frac = 0.05
self.budget_frac = 0.10
self.data_splits_frac = np.round(np.linspace(self.budget_frac, 1, num=10, endpoint=True), 1)
self.batch_size=64
def _init_al_dataset(self):
""" Initialises dataset for active learning
"""
self._init_dataset()
train_dataset = self.datasets['train']
dataset_size = len(train_dataset)
self.budget = math.ceil(self.budget_frac*dataset_size)
Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this
all_indices = set(np.arange(dataset_size))
k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)
initial_indices = random.sample(list(all_indices), k=k_initial)
sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset
self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)
self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)
self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)
return all_indices, initial_indices
def learn(self):
""" Performs the active learning cycle """
metrics_hist = dict()
max_runs = 3
for run in range(max_runs):
all_indices, initial_indices = self._init_al_dataset()
metrics_hist[str(run)] = dict()
current_indices = list(initial_indices)
for split in self.data_splits_frac:
print(f'\nRUN {run} - SPLIT - {split*100:0.0f}%')
# Initialise models
self._init_models(mode='svaal')
# Do some label stuff
unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)
unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)
unlabelled_dataloader = data.DataLoader(self.datasets['train'],
sampler=unlabelled_sampler,
batch_size=64,
drop_last=False)
print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')
# TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.
if len(unlabelled_indices) == 0:
break
metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,
dataloader_u=unlabelled_dataloader,
dataloader_v=self.val_dataloader,
dataloader_t=self.test_dataloader,
mode='svaal')
print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%')
# Record performance at each split
metrics_hist[str(run)][str(split)] = metrics
sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg
current_indices = list(current_indices) + list(sampled_indices)
sampler = data.sampler.SubsetRandomSampler(current_indices)
self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)
# write results to disk
with open('results.json', 'w') as fj:
json.dump(metrics_hist, fj, indent=4)
def main(config):
al = ActiveLearner()
al.learn()
if __name__ == '__main__':
# Seeds
config = load_config()
np.random.seed(config['Utils']['seed'])
torch.manual_seed(config['Utils']['seed'])
main()
```
#### File: src/tests/_tests_sampler.py
```python
class Tests(unittest.TestCase):
def _sim_model(self, data: Tensor) -> Tensor:
""" Simulated model for generating uncertainity scores. Intention
is to be a placeholder until real models are used and for testing."""
return torch.rand(size=(data.shape[0],))
def setUp(self):
# Init class
self.sampler = Sampler(budget=10)
# Init random tensor
self.data = torch.rand(size=(10,2,2)) # dim (batch, length, features)
# Params
self.budget = 18
# All sample tests are tested for:
# 1. dims (_, length, features) for input and output Tensors
# 2. batch size == sample size
def test_sample_random(self):
self.assertEqual(self.sampler.sample_random(self.data).shape[1:], self.data.shape[1:])
self.assertEqual(self.sampler.sample_random(self.data).shape[0], self.sampler.budget)
def test_sample_least_confidence(self):
self.assertEqual(self.sampler.sample_least_confidence(model=self.sampler._sim_model, data=self.data).shape[1:], self.data.shape[1:])
self.assertEqual(self.sampler.sample_least_confidence(model=self.sampler._sim_model, data=self.data).shape[0], self.sampler.budget)
# def test_sample_bayesian(self):
# self.assertEqual(self.sampler.sample_bayesian(model=self.sampler._sim_model, no_models=3, data=self.data).shape[1:], self.data.shape[1:])
# self.assertEqual(self.sampler.sample_bayesian(model=self.sampler._sim_model, no_models=3, data=self.data).shape[0], self.sampler.budget)
# def test_adversarial_sample(self):
# self.assertEqual(self.sampler.sample_adversarial(self.data).shape[1:], self.data.shape[1:])
# self.assertEqual(self.sampler.sample_adversarial(self.data).shape[0], self.sampler.budget)
```
#### File: src/tests/test_utils.py
```python
class Trainer():
"""
"""
def __init__(self):
pass
def _init_dataset_gen(self):
""" Initialises dataset for model training """
# Currently will be using generated data, but in the future will be real.
self.train_dataset_l = SequenceDataset(self.config, no_sequences=8, max_sequence_length=self.max_sequence_length, task_type=self.task_type)
self.train_dataloader_l = DataLoader(self.train_dataset_l, batch_size=2, shuffle=True, num_workers=0)
self.train_dataset_u = SequenceDataset(self.config, no_sequences=16, max_sequence_length=self.max_sequence_length, task_type=self.task_type)
self.train_dataloader_u = DataLoader(self.train_dataset_u, batch_size=2, shuffle=True, num_workers=0)
# Concatenate sequences in X_l and X_u to build vocabulary for downstream
self.vocab = self.build_vocab(sequences = torch.cat((self.train_dataset_l.sequences, self.train_dataset_u.sequences)))
self.vocab_size = len(self.vocab)
print('---- DATA SUCCESSFULLY INITIALISED ----')
class Tests(unittest.TestCase):
def setUp(self):
self.tensor_shape = (100,10,20)
self.sequences = torch.stack([torch.randint(0,10,size=(10,)) for _ in range(self.tensor_shape[0])])
self.split_2 = (0.2,0.8)
self.split_3 = (0.1,0.1,0.8)
self.rand_tensor = torch.randint(0,10,size=self.tensor_shape)
def test_data_split(self):
ds1, ds2 = split_data(dataset=self.rand_tensor, splits=self.split_2)
self.assertEqual(len(ds1), self.tensor_shape[0]*self.split_2[0])
self.assertEqual(len(ds2), self.tensor_shape[0]*self.split_2[1])
ds1, ds2, ds3 = split_data(dataset=self.rand_tensor, splits=self.split_3)
self.assertEqual(len(ds1), self.tensor_shape[0]*self.split_3[0])
self.assertEqual(len(ds2), self.tensor_shape[0]*self.split_3[1])
self.assertEqual(len(ds3), self.tensor_shape[0]*self.split_3[2])
def test_get_lengths(self):
self.assertEqual(len(get_lengths(self.sequences)), self.tensor_shape[0])
```
#### File: s-vaal/src/utils.py
```python
import yaml
import csv
import json
from itertools import groupby
import itertools
import re
import math
from datetime import date, datetime
import os
import sys, traceback
import unittest
from nltk.tokenize import word_tokenize
from collections import defaultdict
import io
from connections import load_config
import torch
Tensor = torch.Tensor
class DataPreparation:
""" Utility functions for preparing sequence labelling datasets """
def __init__(self):
config = load_config()
self.utils_config = config['Utils']
self.task_type = self.utils_config['task_type']
self.data_name = self.utils_config[self.task_type]['data_name']
self.min_occurence = self.utils_config[self.task_type]['min_occurence']
self.special_tokens = self.utils_config['special_token2idx']
self.date = date.today().strftime('%d-%m-%Y')
self.max_seq_len = self.utils_config[self.task_type]['max_sequence_length']
self.x_y_pair_name = 'seq_label_pairs' if self.task_type == 'CLF' else 'seq_tags_pairs' # Key in dataset - semantically correct for the task at hand.
self.pad_token = '<PAD>'
self.sos_token = '<START>'
self.eos_token = '<STOP>'
print(f'{datetime.now()}: Building {self.data_name.upper()} data for {self.task_type.upper()} task')
if self.task_type == 'SEQ':
self._load_data()
self._process_data_ner()
self._process_pretrain_data_ner()
elif self.task_type=='CLF':
self._load_data()
self._process_data_clf()
else:
raise ValueError
def _read_txt(self, path):
f = open(path, 'r')
data = f.readlines()
if self.task_type == 'SEQ':
if self.data_name == 'conll2003':
# CoNLL-2003 (NER)
# remove DOCSTART (this is specific to conll2003 original formatting)
data = [line for line in data if 'DOCSTART' not in line]
if self.data_name == 'ontonotes-5.0' or 'bbn':
data = [line for line in data]
f.close()
elif self.task_type == 'CLF':
# Currently no CLF data that needs text processing
pass
return data
def _read_csv(self, path):
""" Reads data in .CSV format
Arguments
---------
path : str
Path to .csv file location
Returns
-------
data : list
List of tuples corresponding to Xn, y pairs/triples
"""
data = list()
with open(path, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
# data = list()
corpus_str = ''
for row in csv_reader:
if self.data_name == 'ag_news':
# ag_news has col 1 - label, col 2 - headline, col 3 - article text
# and has no column headers. For this dataset we concatenate the headline to the article text
corpus_str += f'{row[1] + " " +row[2]}\t{row[0]}\n'
data = [line for line in corpus_str.split('\n') if line] # if statement gets rid of empty strings
return data
def _load_data(self):
""" Loads data for each split and combines into a dictionary for downstream tasks """
if self.utils_config[self.task_type]['data_split']:
self.dataset = dict()
for split in self.utils_config[self.task_type]['data_split']:
self.dataset[split] = dict()
# Read text documents
if self.data_name == 'conll2003' or 'ontonotes-5.0' or 'bbn':
self.dataset[split]['corpus'] = self._read_txt(os.path.join(self.utils_config[self.task_type]['data_root_path'], f'{split}.txt'))
elif self.data_name == 'ag_news':
self.dataset[split]['corpus'] = self._read_csv(os.path.join(self.utils_config[self.task_type]['data_root_path'], f'{split}.csv'))
else:
raise ValueError
else:
# No splits, single corpora -> need to split into test-train-valid (TODO: future work)
pass
def _process_data_clf(self):
pass
# # Trim and pad sequences
# self._trim_sequences(split=split)
# self._add_special_tokens(split=split)
# self._pad_sequences(split=split)
# if split == 'train':
# print('Building vocabularies and mappings from training data')
# self._build_vocabs()
# self._word2idx()
# self._idx2word()
# self._tag2idx()
# self._idx2tag()
# self._save_vocabs() # do this after word2idx etc as it allows us to save them into the same json as vocabs
# self.convert_sequences(split=split)
# # Save results (add datetime and counts)
# self._save_json(path=os.path.join(self.utils_config[self.task_type]['data_root_path'], f'data.json'), data=self.dataset)
def _process_data_ner(self):
""" Controller for processing named entity recognition (sequence) data """
for split, data in self.dataset.items():
# Convert corpora into key-value pairs of sequences-tags
# Need to seperate words and tags before trimming and padding
# a tad of duplication, but all good.
self._prepare_sequences(split=split, data=data)
# Trim and pad sequences
self._trim_sequences(split=split)
self._add_special_tokens(split=split)
self._pad_sequences(split=split)
if split == 'train':
print('Building vocabularies and mappings from training data')
self._build_vocabs()
self._word2idx()
self._idx2word()
self._tag2idx()
self._idx2tag()
self._save_vocabs() # do this after word2idx etc as it allows us to save them into the same json as vocabs
self.convert_sequences(split=split)
# Save results (add datetime and counts)
self._save_json(path=os.path.join(self.utils_config[self.task_type]['data_root_path'], f'data.json'), data=self.dataset)
def _process_pretrain_data_ner(self):
data_out = defaultdict(dict)
print(f'{datetime.now()}: Generating pretraining datasets')
for split, data in self.dataset.items():
data = data['corpus']
assert self.data_name == 'conll2003' # only developed for conll2003 atm
docs = [list(group) for k, group in groupby(data, lambda x: len(x) == 1) if not k]
if split == 'train':
w2c = dict()
w2i = dict()
i2w = dict()
for st, idx in self.utils_config['special_token2idx'].items():
i2w[idx] = st
w2i[st] = idx
for idx, doc in enumerate(docs):
# conll2003 needs to be split on tab before tokenization
doc = " ".join([token.split()[0] for token in doc])
# words = word_tokenize(doc)
words = doc.split()
# Trim based on sequence length
# This should be -1 but as it needs to be the same size as the normal vocab, it's -2
words = words[:self.max_seq_len-2] # -2 for SOS, EOS tags
for word in words:
if word in w2c.keys():
w2c[word] += 1
else:
w2c[word] = 1
for w, c in w2c.items():
if c > self.min_occurence and w not in self.utils_config['special_token2idx'].keys():
i2w[len(w2i)] = w
w2i[w] = len(w2i)
assert len(w2i) == len(i2w)
print(f'{datetime.now()}: Vocab of {len(w2i)} keys created')
vocab = {"word2idx": w2i, "idx2word": i2w}
with io.open(os.path.join(self.utils_config[self.task_type]['data_root_path'], 'pretrain', 'vocab.json'), 'w') as vocab_file:
json.dump(vocab, vocab_file, ensure_ascii=False, indent=4)
# vocab_file.write(data.encode('utf8', 'replace'))
data_out[split] = dict()
for idx, doc in enumerate(docs):
id = len(data_out[split])
data_out[split][id] = dict()
doc = " ".join([token.split()[0] for token in doc])
# words = word_tokenize(doc)
words = doc.split()
input = ['<START>'] + words
input = input[:self.max_seq_len]
target = words[:self.max_seq_len-1]
target = target + ['<STOP>']
assert len(input) == len(target)
length = len(input)
input.extend(['<PAD>'] * (self.max_seq_len-length))
target.extend(['<PAD>'] * (self.max_seq_len-length))
input = [w2i.get(w, w2i['<UNK>']) for w in input]
target = [w2i.get(w, w2i['<UNK>']) for w in target]
data_out[split][id]['input'] = input
data_out[split][id]['target'] = target
data_out[split][id]['length'] = length
with io.open(os.path.join(self.utils_config[self.task_type]['data_root_path'], 'pretrain', 'data.json'), 'w') as data_file:
json.dump(data_out, data_file, ensure_ascii=False, indent=4)
def _prepare_sequences(self, split : str, data):
""" Converts corpus into sequence-tag tuples.
Notes
-----
- Currently works for NER (CoNLL-2003) and CLF (AG NEWS)
- Extend for POS
"""
print(f'{datetime.now()}: Preparing sequences')
corpus = data['corpus']
if self.data_name == 'conll2003' or 'ontonotes-5.0' or 'bbn':
docs = [list(group) for k, group in groupby(corpus, lambda x: len(x) == 1) if not k]
elif self.data_name == 'ag_news':
docs = corpus
self.dataset[split][self.x_y_pair_name] = list()
data = dict()
# Split docs into sequences and tags
doc_count = 0
delimiter = '\t' if self.data_name == 'ag_news' else ' '
for doc in docs:
try:
if self.data_name == 'conll2003' or 'ontonotes-5.0' or 'bbn':
sequence = [token.split(delimiter)[0] for token in doc]
tags = [token.split(delimiter)[-1] for token in doc]
tags = [tag.replace('\n','') for tag in tags]
# print(tags)
data[doc_count] = (sequence, tags)
elif self.data_name == 'ag_news':
sequence = doc.split(delimiter)[0].split() # split seq from seq-tag string and then split on white space for naive tokenization
tag = [doc.split(delimiter)[1]]
data[doc_count] = (sequence, tag)
doc_count += 1
except:
print(f'Unable to process document: {doc}')
traceback.print_exc(file=sys.stdout)
self.dataset[split][self.x_y_pair_name] = data
def _build_vocabs(self, split='train'):
""" Builds vocabularies off of words and tags. These are built from training data so out of vocabulary tokens
will be marked as <UNK> when being converted into numerical vectors. """
# Get list of words in corpus
word_list = list(itertools.chain.from_iterable([doc.split() for doc in [" ".join(seq) for seq, tag in self.dataset[split][self.x_y_pair_name].values()]]))
print(f'Total number of tokens in training corpus: {len(word_list)}')
# Remove special_tokens (these are added explicitly later)
word_list = [word for word in word_list if word not in list(self.special_tokens.keys())]
# print(word_list)
word_freqs = dict()
# Get word frequencies
for word in word_list:
if not word_freqs.get(word, False):
word_freqs[word] = 1
else:
word_freqs[word] += 1
# print(word_freqs)
# Get set of frequent words over minimum occurence
word_list_keep = list()
word_list_notkeep = list()
for word, freq in word_freqs.items():
if self.min_occurence < freq:
# keep word
word_list_keep.append(word)
else:
word_list_notkeep.append(word+'\n')
if self.min_occurence > 0:
with open(os.path.join(self.utils_config[self.task_type]['data_root_path'], 'nonvocab_words.txt'), 'w') as fw:
fw.writelines(word_list_notkeep)
print(f'Word list sizes - Original: {len(word_freqs.keys())} - Trimmed: {len(word_list_keep)}')
# Build word and tag vocabularies
# comprehensions are a bit nasty... but effective!
self.vocab_words = word_list_keep
tag_list = list(itertools.chain.from_iterable([doc.split() for doc in [" ".join(tag) for seq, tag in self.dataset[split][self.x_y_pair_name].values()]]))
# Remove special_tokens (these are added explicitly later)
tag_list = [tag for tag in tag_list if tag not in list(self.special_tokens.keys())]
self.vocab_tags = list(set(tag_list))
# Add special_tokens to vocabs
self.vocab_words = list(self.special_tokens.keys()) + self.vocab_words
self.vocab_tags = list(self.special_tokens.keys()) + self.vocab_tags
print(f'Size of vocabularies - Word: {len(self.vocab_words)} Tag: {len(self.vocab_tags)}')
def _save_vocabs(self):
# Save vocabularies to disk
vocabs = {'words': self.vocab_words, 'tags': self.vocab_tags, 'word2idx': self.word2idx, 'idx2word': self.idx2word}
self._save_json(path=os.path.join(self.utils_config[self.task_type]['data_root_path'], 'vocabs.json'),data=vocabs)
def _word2idx(self):
""" Built off of training set - out of vocab tokens are <UNK>"""
self.word2idx = {word:idx for idx, word in enumerate(self.vocab_words)}
# add special tokens to mapping
self.word2idx = {**self.special_tokens, **self.word2idx, }
def _idx2word(self):
""" Built off of training set - out of vocab tokens are <UNK> """
self.idx2word = {idx:word for word, idx in self.word2idx.items()}
def _tag2idx(self):
""" Built off of training set - out of vocab tokens are <UNK>"""
self.tag2idx = {tag:idx for idx, tag in enumerate(self.vocab_tags)}
def _idx2tag(self):
""" Built off of training set - out of vocab tokens are <UNK> """
self.idx2tag = {idx:tag for tag, idx in self.tag2idx.items()}
def _trim_sequences(self, split: str):
""" Trims sequences to the maximum allowable length """
for idx, pair in self.dataset[split][self.x_y_pair_name].items():
seq, tags = pair # tag for CLF, tags for SEQ
self.dataset[split][self.x_y_pair_name][idx] = (seq[:self.max_seq_len-2], tags[:self.max_seq_len-2]) # -2 for SOS, EOS tags
def _pad_sequences(self, split: str):
""" Pads sequences up to the maximum allowable length """
for idx, pair in self.dataset[split][self.x_y_pair_name].items():
seq, tags = pair
if len(seq) < self.max_seq_len:
# probably a better way to do this, but comprehension is easy. TODO: fix dodgy code!
seq = seq + [self.pad_token for _ in range(self.max_seq_len - len(seq))]
if self.task_type == 'SEQ':
tags = tags + [self.pad_token for _ in range(self.max_seq_len - len(tags))]
self.dataset[split][self.x_y_pair_name][idx] = (seq, tags)
else:
# Leave tag alone
self.dataset[split][self.x_y_pair_name][idx] = (seq, tags)
def _add_special_tokens(self, split: str):
""" Adds special tokens such as <EOS>, <SOS> onto sequences """
for idx, pair in self.dataset[split][self.x_y_pair_name].items():
seq, tags = pair
seq = [self.sos_token] + seq + [self.eos_token]
if self.task_type == 'SEQ':
tags = [self.sos_token] + tags + [self.eos_token]
self.dataset[split][self.x_y_pair_name][idx] = (seq, tags)
else:
# Leave tag alone
self.dataset[split][self.x_y_pair_name][idx] = (seq, tags)
def convert_sequences(self, split: str):
"""
Converts sequences of tokens and tags to their indexed forms for each split in the dataset
Note: any word in the sequence that isn't in the vocab will be replaced with <UNK>
TODO: investigate how this might impact the SVAE word_dropout methodology """
# If word or tag is not in the sequence, change with <UNK>
# unsure if the output tags need to be changed? I assume not as the output tags are known. TODO: verify logic.
self.dataset[split][f'{self.x_y_pair_name}_enc'] = dict() # enc -> integer encoded pairs
for idx, pair in self.dataset[split][self.x_y_pair_name].items():
seq, tags = pair
# Sequences
seq_enc = [self.word2idx.get(word, self.word2idx['<UNK>']) for word in seq]
# Tags
tags_enc = [self.tag2idx.get(tag, self.word2idx['<UNK>']) for tag in tags]
# print(tags)
# print(tags_enc)
self.dataset[split][f'{self.x_y_pair_name}_enc'][idx] = (seq_enc, tags_enc)
def normalise(self):
pass
def _save_json(self, path: str, data: dict):
with open(path, 'w') as outfile:
json.dump(data, outfile, indent=4)
# Misc functions below
def trim_padded_seqs(batch_lengths: Tensor, batch_sequences: Tensor, pad_idx: int) -> Tensor:
""" Takes a batch of sequences and trims similar to pack padded sequence method
Arguments
---------
batch_lengths : Tensor
Batch of sequence lengths
batch_tags : Tensor
Batch of sequences
pad_idx : Int
Integer mapped to padding special token
Returns
-------
batch_tags : Tensor
Sorted and trimmed batch of sequence tags
"""
# Get max length of longest sequence in batch so it can be used to filter tags
sorted_lengths, _ = torch.sort(batch_lengths, descending=True) # longest seq is at index 0
longest_seq = sorted_lengths[0].data.cpu().numpy()
longest_seq_len = longest_seq[longest_seq != pad_idx][0] # remove padding
# Strip off as much padding as possible similar to (variable length sequences via pack padded methods)
batch_sequences = torch.stack([tags[:longest_seq_len] for tags in batch_sequences])
assert batch_sequences.is_cuda
return batch_sequences
def to_var(x: Tensor) -> Tensor:
""" Converts object to variable mounted on GPU """
if torch.cuda.is_available():
x = x.cuda()
return x
def load_json(path: str) -> dict:
""" Loads JSON file from disk
Arguments
---------
path : str
Path to JSON file on disk
Returns
-------
data : dict
Dictionary of JSON file
"""
with open(path, 'r') as jsonfile:
data = json.load(jsonfile)
return data
def get_lengths(sequences: Tensor) -> Tensor:
""" Calculates lengths of sequences
Arguments
---------
sequences : Tensor
Set of sequences.
Returns
-------
lengths : Tensor
Set of sequence lengths
"""
lengths = torch.tensor([len(sequence) for sequence in sequences])
return lengths
def split_data(dataset: Tensor, splits: tuple) -> Tensor:
""" Partitions data into different sets
Arguments
---------
dataset : Tensor
Tensor of data.
splits : tuple
Tuple of floats indicating ordered splits
Returns
-------
dataset : Tensor
Ordered set of dataset subset objects corresponding to splits
Notes
-----
random_split can have its generator fixed to be deterministic for reproducible results.
"""
assert sum(list(splits)) == 1.0
if len(splits) == 2:
split_dataset = torch.utils.data.random_split(dataset=dataset, lengths=[int(math.floor(len(dataset)*splits[0])),
int(math.ceil(len(dataset)*splits[1]))])
return split_dataset[0], split_dataset[1]
elif len(splits) == 3:
# TODO: figure out how to ensure that the three splits have the same total samples as the input dataset...
split_dataset = torch.utils.data.random_split(dataset=dataset, lengths=[int(len(dataset)*splits[0]),
int(len(dataset)*splits[1]),
int(len(dataset)*splits[2])])
return split_dataset[0], split_dataset[1], split_dataset[2]
else:
raise ValueError
def prepare_for_embedding():
""" Prepares sequences for Flair embedding """
from flair.data import Sentence
text = 'Hello my name is <NAME>!'
sentence_e = Sentence(text)
print(embeddings.embed(sentence_e))
def main():
DataPreparation()
if __name__ == '__main__':
# Seeds
main()
```
#### File: s-vaal/src/vae_pretrainer.py
```python
from datetime import datetime
import os
from tqdm import tqdm
import math
import random
import numpy as np
import sys, traceback
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.utils.data as data
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
# torch.cuda.empty_cache()
from models import SVAE, Discriminator
from data import RealDataset
from utils import load_json, trim_padded_seqs
from connections import load_config
from sampler import Sampler
class ModularTrainer(Sampler):
def __init__(self):
self.config = load_config()
self.model_config = self.config['Models']
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model
self.task_type = self.config['Utils']['task_type']
self.max_sequence_length = self.config['Utils'][self.task_type]['max_sequence_length']
# Real data
self.data_name = self.config['Utils'][self.task_type]['data_name']
self.data_splits = self.config['Utils'][self.task_type]['data_split']
self.pad_idx = self.config['Utils']['special_token2idx']['<PAD>']
# Test run properties
self.epochs = self.config['Train']['epochs']
self.svae_iterations = self.config['Train']['svae_iterations']
self.kfold_xval = False
def _init_data(self, batch_size=None):
if batch_size is None:
batch_size = self.config['Train']['batch_size']
# Load pre-processed data
path_data = os.path.join('/home/tyler/Desktop/Repos/s-vaal/data', self.task_type, self.data_name, 'pretrain', 'data.json')
path_vocab = os.path.join('/home/tyler/Desktop/Repos/s-vaal/data', self.task_type, self.data_name, 'pretrain', 'vocab.json') # not vocabs
data = load_json(path_data)
self.vocab = load_json(path_vocab) # Required for decoding sequences for interpretations. TODO: Find suitable location... or leave be...
self.vocab_size = len(self.vocab['word2idx'])
self.idx2word = self.vocab['idx2word']
self.word2idx = self.vocab['word2idx']
self.datasets = dict()
if self.kfold_xval:
# Perform k-fold cross-validation
# Join all datasets and then randomly assign train/val/test
print('hello')
for split in self.data_splits:
print(data[split][self.x_y_pair_name])
else:
for split in self.data_splits:
# Access data
split_data = data[split]
# print(split_data)
# Convert lists of encoded sequences into tensors and stack into one large tensor
split_inputs = torch.stack([torch.tensor(value['input']) for key, value in split_data.items()])
split_targets = torch.stack([torch.tensor(value['target']) for key, value in split_data.items()])
# Create torch dataset from tensors
split_dataset = RealDataset(sequences=split_inputs, tags=split_targets)
# Add to dictionary
self.datasets[split] = split_dataset #split_dataloader
# Create torch dataloader generator from dataset
if split == 'test':
self.test_dataloader = DataLoader(dataset=split_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
if split == 'valid':
self.val_dataloader = DataLoader(dataset=split_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
if split == 'test':
self.train_dataloader = DataLoader(dataset=split_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
print(f'{datetime.now()}: Data loaded succesfully')
def _init_svae_model(self):
self.svae = SVAE(**self.model_config['SVAE']['Parameters'],vocab_size=self.vocab_size).to(self.device)
self.svae_optim = optim.Adam(self.svae.parameters(), lr=self.model_config['SVAE']['learning_rate'])
self.svae.train()
print(f'{datetime.now()}: Initialised SVAE successfully')
def interpolate(self, start, end, steps):
interpolation = np.zeros((start.shape[0], steps+2))
for dim, (s, e) in enumerate(zip(start, end)):
interpolation[dim] = np.linspace(s, e, steps+2)
return interpolation.T
def _idx2word_inf(self, idx, i2w, pad_idx):
# inf-erence
sent_str = [str()]*len(idx)
for i, sent in enumerate(idx):
for word_id in sent:
if word_id == pad_idx:
break
sent_str[i] += i2w[str(word_id.item())] + " "
sent_str[i] = sent_str[i].strip()
return sent_str
def _pretrain_svae(self):
self._init_data()
self._init_svae_model()
tb_writer = SummaryWriter(comment=f"pretrain svae {self.data_name}", filename_suffix=f"pretrain svae {self.data_name}")
print(f'{datetime.now()}: Training started')
step = 0
for epoch in range(1, self.config['Train']['epochs']+1, 1):
for batch_inputs, batch_lengths, batch_targets in self.train_dataloader:
if torch.cuda.is_available():
batch_inputs = batch_inputs.to(self.device)
batch_lengths = batch_lengths.to(self.device)
batch_targets = batch_targets.to(self.device)
batch_size = batch_inputs.size(0)
logp, mean, logv, _ = self.svae(batch_inputs, batch_lengths, pretrain=False)
NLL_loss, KL_loss, KL_weight = self.svae.loss_fn(logp=logp,
target=batch_targets,
length=batch_lengths,
mean=mean,
logv=logv,
anneal_fn=self.model_config['SVAE']['anneal_function'],
step=step,
k=self.model_config['SVAE']['k'],
x0=self.model_config['SVAE']['x0'])
svae_loss = (NLL_loss + KL_weight * KL_loss) / batch_size
self.svae_optim.zero_grad()
svae_loss.backward()
self.svae_optim.step()
tb_writer.add_scalar('Loss/train/KLL', KL_loss, step)
tb_writer.add_scalar('Loss/train/NLL', NLL_loss, step)
tb_writer.add_scalar('Loss/train/Total', svae_loss, step)
tb_writer.add_scalar('Utils/train/KL_weight', KL_weight, step)
# Increment step after each batch of data
step += 1
if epoch % 1 == 0:
print(f'{datetime.now()}: Epoch {epoch} Loss {svae_loss:0.2f} Step {step}')
if epoch % 5 == 0:
# Perform inference
self.svae.eval()
try:
samples, z = self.svae.inference(n=2)
print(*self._idx2word_inf(samples, i2w=self.idx2word, pad_idx=self.config['Utils']['special_token2idx']['<PAD>']), sep='\n')
except:
traceback.print_exc(file=sys.stdout)
self.svae.train()
# Save final model
save_path = os.getcwd() + '/best models/svae.pt'
torch.save(self.svae.state_dict(), save_path)
print(f'{datetime.now()}: Model saved')
print(f'{datetime.now()}: Training finished')
if __name__ == '__main__':
mt = ModularTrainer()
mt._pretrain_svae()
# mt._init_data()
# for inputs, lengths, targets in mt.datasets['test']: #test_dataloader
# print(inputs)
# print(targets)
# print(lengths)
# break
``` |
{
"source": "4thel00z/subdomain-takeover-scraper",
"score": 2
} |
#### File: subdomain-takeover-scraper/app/app.py
```python
import asyncio
import functools
import socket
import typing
from itertools import product
from string import ascii_lowercase, digits
import aiohttp
import async_timeout
from static.subdomains import subdomains
from static.tlds import tlds
SUCCESSFUL_MAPPED_HOSTS = {}
SUBPROCESS_COUNT = 0
SUBPROCESS_MAX_COUNT = 50
WAIT_INTERVALL = 0.1
WORDLIST_URL = "https://github.com/dwyl/english-words/blob/master/words.txt?raw=true"
ENUMERATOR_QUEUE = []
class Mode:
HOST_ENUMERATION = 0
def possible_hosts(length):
for host in (''.join(i) for i in product(ascii_lowercase + digits + "-", repeat=length)):
yield host
def handle_connection_error(url):
print("{url} could not be retireved".format(url=url))
# FIXME: add a mantainer task for deferred retries
ENUMERATOR_QUEUE.append(url)
async def fetch(session, url, *, loop):
with async_timeout.timeout(10, loop=loop):
try:
async with session.get(url) as response:
return await response.text()
except aiohttp.client_exceptions.ClientConnectorError:
handle_connection_error(url)
async def wordlist(loop):
global WORDLIST_URL
with aiohttp.ClientSession(loop=loop) as session:
content = await fetch(session, WORDLIST_URL, loop=loop)
return content
def possible_domain(subdomain, host, tld):
return ".".join([subdomain, host, tld])
async def print_current_map():
print(SUCCESSFUL_MAPPED_HOSTS)
async def _enumerate_hosts(*, loop):
for subdomain in subdomains:
for tld in tlds:
for host in await wordlist(loop):
domain = possible_domain(subdomain=subdomain, host=host, tld=tld)
print("processing {domain}..".format(domain=domain))
await asyncio.ensure_future(execute(domain, loop=loop))
print("processed {domain}...".format(domain=domain))
async def resolve(host, *, loop):
try:
return socket.gethostbyname(host)
except:
return None
async def execute(host, *, loop):
try:
ip = await resolve(host, loop=loop)
await asyncio.ensure_future(print_current_map(), loop=loop)
if ip is not None and ip.strip() != "":
return_code, is_possible_target = await asyncio.ensure_future(can_be_taken_over(host=host, loop=loop))
SUCCESSFUL_MAPPED_HOSTS[host] = {"ip": ip, "takeover": return_code == 0 and is_possible_target,
"claimed": True}
else:
SUCCESSFUL_MAPPED_HOSTS[host] = {"ip": ip, "takeover": False, "claimed": False}
# print("{payload} : {ip}".format(payload=host_name, ip=ip))
except socket.gaierror:
pass
class DigProtocol(asyncio.SubprocessProtocol):
FD_NAMES = ['stdin', 'stdout', 'stderr']
def __init__(self, done_future):
self.done = done_future
self.buffer = bytearray()
super().__init__()
def connection_made(self, transport):
# print('process started {}'.format(transport.get_pid()))
self.transport = transport
def pipe_data_received(self, fd, data):
# print('read {} bytes from {}'.format(len(data), self.FD_NAMES[fd]))
if fd == 1:
self.buffer.extend(data)
def process_exited(self):
# print('process exited')
return_code = self.transport.get_returncode()
# print('return code {}'.format(return_code))
if not return_code:
cmd_output = bytes(self.buffer).decode("utf-8")
results = self._parse_results(cmd_output)
else:
results = []
self.done.set_result((return_code, results))
def _parse_results(self, output: typing.Optional[str]) -> bool:
if not output or (isinstance(output, str) and output.strip() == ""):
return False
return "status: SERVFAIL" in output
async def can_be_taken_over(host, *, loop):
global SUBPROCESS_COUNT, SUBPROCESS_MAX_COUNT
while SUBPROCESS_COUNT >= SUBPROCESS_MAX_COUNT:
await asyncio.sleep(WAIT_INTERVALL, loop=loop)
SUBPROCESS_COUNT += 1
cmd_done = asyncio.Future(loop=loop)
factory = functools.partial(DigProtocol, cmd_done)
proc = loop.subprocess_exec(
factory,
'dig', 'NS {host}'.format(host=host),
stdin=None,
stderr=None,
)
transport = None
try:
transport, protocol = await proc
await cmd_done
finally:
if transport is not None:
transport.close()
SUBPROCESS_COUNT -= 1
return cmd_done.result()
def run(args, mode):
if mode == Mode.HOST_ENUMERATION:
loop = asyncio.get_event_loop()
loop.run_until_complete(_enumerate_hosts(loop=loop))
# TODO:
# Replace all calls to print with logger calls (use swag)
#
#
#
``` |
{
"source": "4tif4nwer/image-super-resolution",
"score": 3
} |
#### File: datasets/div2k/loader.py
```python
import os
import tensorflow as tf
import glob
from tensorflow.python.data.experimental import AUTOTUNE
hr_train_url = "http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip"
hr_valid_url = "http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip"
def download_data(download_url, data_directory):
file = download_url.split("/")[-1]
print(data_directory)
tf.keras.utils.get_file(file, download_url, cache_subdir=data_directory, extract=True)
os.remove(os.path.join(data_directory, file))
def image_dataset_from_directory_or_url(data_directory, image_directory, download_url):
images_path = os.path.join(data_directory, image_directory)
if not os.path.exists(images_path):
print("Couldn't find directory: ", images_path)
os.makedirs(data_directory, exist_ok=True)
download_data(download_url, data_directory)
filenames = sorted(glob.glob(images_path + "/*.png"))
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.map(tf.io.read_file)
dataset = dataset.map(lambda x: tf.image.decode_png(x, channels=3), num_parallel_calls=AUTOTUNE)
cache_directory = os.path.join(data_directory, "cache", image_directory)
os.makedirs(cache_directory, exist_ok=True)
cache_file = cache_directory + "/cache"
dataset = dataset.cache(cache_file)
if not os.path.exists(cache_file + ".index"):
populate_cache(dataset, cache_file)
return dataset
def create_training_dataset(dataset_parameters, train_mappings, batch_size):
lr_dataset = image_dataset_from_directory_or_url(dataset_parameters.save_data_directory, dataset_parameters.train_directory, dataset_parameters.train_url)
hr_dataset = image_dataset_from_directory_or_url(dataset_parameters.save_data_directory, "DIV2K_train_HR", hr_train_url)
dataset = tf.data.Dataset.zip((lr_dataset, hr_dataset))
for mapping in train_mappings:
dataset = dataset.map(mapping, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
def create_validation_dataset(dataset_parameters):
lr_dataset = image_dataset_from_directory_or_url(dataset_parameters.save_data_directory, dataset_parameters.valid_directory, dataset_parameters.valid_url)
hr_dataset = image_dataset_from_directory_or_url(dataset_parameters.save_data_directory, "DIV2K_valid_HR", hr_valid_url)
dataset = tf.data.Dataset.zip((lr_dataset, hr_dataset))
dataset = dataset.batch(1)
dataset = dataset.repeat(1)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
def create_training_and_validation_datasets(dataset_parameters, train_mappings, train_batch_size=16):
training_dataset = create_training_dataset(dataset_parameters, train_mappings, train_batch_size)
validation_dataset = create_validation_dataset(dataset_parameters)
return training_dataset, validation_dataset
def populate_cache(dataset, cache_file):
print(f'Begin caching in {cache_file}.')
for _ in dataset: pass
print(f'Completed caching in {cache_file}.')
```
#### File: image-super-resolution/models/srresnet.py
```python
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Add, Lambda
from tensorflow.python.keras.layers import PReLU
from utils.normalization import normalize_01, denormalize_m11
upsamples_per_scale = {
2: 1,
4: 2,
8: 3
}
pretrained_srresnet_models = {
"srresnet_bicubic_x4": {
"url": "https://image-super-resolution-weights.s3.af-south-1.amazonaws.com/srresnet_bicubic_x4/generator.h5",
"scale": 4
}
}
def pixel_shuffle(scale):
return lambda x: tf.nn.depth_to_space(x, scale)
def upsample(x_in, num_filters):
x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in)
x = Lambda(pixel_shuffle(scale=2))(x)
return PReLU(shared_axes=[1, 2])(x)
def residual_block(block_input, num_filters, momentum=0.8):
x = Conv2D(num_filters, kernel_size=3, padding='same')(block_input)
x = BatchNormalization(momentum=momentum)(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization(momentum=momentum)(x)
x = Add()([block_input, x])
return x
def build_srresnet(scale=4, num_filters=64, num_res_blocks=16):
if scale not in upsamples_per_scale:
raise ValueError(f"available scales are: {upsamples_per_scale.keys()}")
num_upsamples = upsamples_per_scale[scale]
lr = Input(shape=(None, None, 3))
x = Lambda(normalize_01)(lr)
x = Conv2D(num_filters, kernel_size=9, padding='same')(x)
x = x_1 = PReLU(shared_axes=[1, 2])(x)
for _ in range(num_res_blocks):
x = residual_block(x, num_filters)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x_1, x])
for _ in range(num_upsamples):
x = upsample(x, num_filters * 4)
x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)
sr = Lambda(denormalize_m11)(x)
return Model(lr, sr)
```
#### File: image-super-resolution/utils/normalization.py
```python
def normalize_m11(x):
"""Normalizes RGB images to [-1, 1]."""
return x / 127.5 - 1
def normalize_01(x):
return x / 255.0
def denormalize_m11(x):
return (x + 1) * 127.5
``` |
{
"source": "4tikhonov/covid-19-infrastructure",
"score": 2
} |
#### File: api/app/main.py
```python
from fastapi import FastAPI
from covid19dh import covid19, cite
from config import covid19datahub_goal, covid19datahub_authors
import json
app = FastAPI()
@app.get("/")
def read_root():
return {"CoronaWhy Data API": "v0.1"}
#@app.get("/items/{item_id}")
#def read_item(item_id: int, q: str = None):
# return {"item_id": item_id, "q": q}
@app.get("/country/{item_id}")
# http://api.apps.coronawhy.org/data/country/FRA
def data_item(item_id: str, q: str = None):
jsondataset = covid19(item_id, verbose = False)
data = {}
datapoints = json.loads(jsondataset.to_json())
data['authors'] = str(covid19datahub_authors)
data['goal'] = str(covid19datahub_goal)
data['data'] = datapoints
data['citations'] = cite(jsondataset)
#return json.dumps(data, sort_keys=True, indent=4)
return data
@app.get("/data_by_pid/{item_id}")
def data_persistent(item_id: str, q: str = None):
return {"PID": pid, "q": q}
``` |
{
"source": "4tronix/PiconPlus",
"score": 3
} |
#### File: PiconPlus/Python/piconplus.py
```python
from __future__ import print_function
import smbus, time
bus = smbus.SMBus(1) # For revision 1 Raspberry Pi, change to bus = smbus.SMBus(0)
pzaddr = 0x26 # I2C address of Picon Zero
#---------------------------------------------
# Definitions of Commands to Picon Zero
MOTORA = 0
OUTCFG0 = 4
OUTPUT0 = 7
INCFG0 = 11
SETBRIGHT = 15
UPDATENOW = 16
RESET = 20
#---------------------------------------------
#---------------------------------------------
# General Constants
RETRIES = 10 # max number of retries for I2C calls
#---------------------------------------------
#---------------------------------------------
class PiconPlus:
#---------------------------------------------
# Get Version and Revision info
def getRevision(self):
for i in range(RETRIES):
try:
rval = bus.read_word_data (self.i2cAddress, 0)
return [rval>>8, rval%256]
except:
if (self.debug):
print("Error in getRevision(), retrying")
return [-1, -1]
#---------------------------------------------
#---------------------------------------------
# motor must be in range 0..3
# value must be in range -100 - +100
# values of -100 and +100 are treated as always ON,, no PWM
def setMotor (self, motor, value):
if (motor>=0 and motor<=3 and value>=-100 and value<100):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, motor, value)
break
except:
if (self.debug):
print("Error in setMotor(), retrying")
def forward (self, speed):
self.setMotor (0, speed)
self.setMotor (1, speed)
def reverse (self, speed):
self.setMotor (0, -speed)
self.setMotor (1, -speed)
def spinLeft (self, speed):
self.setMotor (0, -speed)
self.setMotor (1, speed)
def spinRight (self, speed):
self.setMotor (0, speed)
self.setMotor (1, -speed)
def stop(self):
self.setMotor (0, 0)
self.setMotor (1, 0)
#---------------------------------------------
#---------------------------------------------
# Read data for selected input channel (analog or digital)
# Channel is in range 0 to 5
def readInput (self, channel):
if (channel>=0 and channel <=5):
for i in range(RETRIES):
try:
return bus.read_word_data (self.i2cAddress, channel + 1)
except:
if (self.debug):
print("Error in readChannel(), retrying")
return -1
#---------------------------------------------
#---------------------------------------------
# Set configuration of selected output channel
# 0: On/Off, 1: PWM, 2: Servo
def setOutputConfig (self, output, value):
if (output>=0 and output<=2 and value>=0 and value<=2):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, OUTCFG0 + output, value)
break
except:
if (self.debug):
print("Error in setOutputConfig(), retrying")
#---------------------------------------------
#---------------------------------------------
# Set configuration of selected input channel
# 0: Digital, 1: Analog, 2: DS18B20
def setInputConfig (self, channel, value, pullup = False):
if (channel >= 0 and channel <= 3 and value >= 0 and value <= 2):
if (value == 0 and pullup == True):
value = 128
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, INCFG0 + channel, value)
break
except:
if (self.debug):
print("Error in setInputConfig(), retrying")
#---------------------------------------------
#---------------------------------------------
# Set output data for selected output channel
# Mode Name Type Values
# 0 On/Off Byte 0 is OFF, 1 is ON
# 1 PWM Byte 0 to 100 percentage of ON time
# 2 Servo Byte -100 to + 100 Position in degrees
def setOutput (self, channel, value):
if (channel>=0 and channel<=2):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, OUTPUT0 + channel, value)
break
except:
if (self.debug):
print("Error in setOutput(), retrying")
#---------------------------------------------
#---------------------------------------------
# Set the colour of an individual pixel (always output 5)
def setPixel (self, Pixel, Red, Green, Blue, Update=True):
pixelData = [Pixel, Red, Green, Blue]
for i in range(RETRIES):
try:
bus.write_i2c_block_data (self.i2cAddress, Update, pixelData)
break
except:
if (self.debug):
print("Error in setPixel(), retrying")
# Set the colour of all pixels
def setAllPixels (self, Red, Green, Blue, Update=True):
pixelData = [100, Red, Green, Blue]
for i in range(RETRIES):
try:
bus.write_i2c_block_data (self.i2cAddress, Update, pixelData)
break
except:
if (self.debug):
print("Error in setAllPixels(), retrying")
# Update the LEDs from the data buffer
def updatePixels (self):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, UPDATENOW, 0)
break
except:
if (self.debug):
print("Error in updatePixels(), retrying")
# Set the overall brightness of pixel array
def setBrightness (self, brightness):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, SETBRIGHT, brightness)
break
except:
if (self.debug):
print("Error in setBrightness(), retrying")
#---------------------------------------------
#---------------------------------------------
# Initialise the Board (same as cleanup)
def __init__ (self, i2cAddress, debug=False):
self.i2cAddress = i2cAddress
self.debug = debug
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, RESET, 0)
break
except:
if (self.debug):
print("Error in init(), retrying")
time.sleep(0.01) #1ms delay to allow time to complete
if (self.debug):
print("Debug is", self.debug)
#---------------------------------------------
#---------------------------------------------
# Cleanup the Board (same as init)
def cleanup (self):
for i in range(RETRIES):
try:
bus.write_byte_data (self.i2cAddress, RESET, 0)
break
except:
if (self.debug):
print("Error in cleanup(), retrying")
time.sleep(0.001) # 1ms delay to allow time to complete
#---------------------------------------------
``` |
{
"source": "4U360/ABSympla",
"score": 3
} |
#### File: core/types/participant.py
```python
from decimal import Decimal
from collections import namedtuple
from typing import Iterator
CustomForm = namedtuple('CustomForm', ["id", "name", "value"])
class Participant(object):
__data = {}
def __init__(self, **kwargs):
self.__data = {**kwargs}
@property
def data(self) -> dict:
return self.__data
@property
def id(self) -> int:
return self.data.get("id")
@property
def order_id(self) -> str:
return self.data.get("order_id")
@property
def first_name(self) -> str:
return self.data.get("first_name", "")
@property
def last_name(self) -> str:
return self.data.get("last_name", "")
@property
def email(self) -> str:
return self.data.get("email", "")
@property
def ticket_number(self) -> str:
return self.data.get("ticket_number", "")
@property
def ticket_num_qr_code(self) -> str:
return self.data.get("ticket_num_qr_code", "")
@property
def ticket_name(self) -> str:
return self.data.get("ticket_name", "")
@property
def pdv_user(self) -> str:
return self.data.get("pdv_user", "")
@property
def ticket_sale_price(self) -> float:
return self.data.get("ticket_sale_price", 0)
@property
def ticket_sale_price_decimal(self) -> Decimal:
return Decimal(self.ticket_sale_price)
@property
def checkin(self) -> dict:
return self.data.get("checkin", {})
@property
def custom_form(self) -> Iterator[CustomForm]:
for form in self.data.get("custom_form", []):
yield CustomForm(**form)
``` |
{
"source": "4U360/Aki-CNPJ",
"score": 3
} |
#### File: akicnpj/downloader/empresa.py
```python
from .base import AkiDownloader
from ..settings import ROOT, Path
from os import makedirs
class AkiEmpresaDownloader(AkiDownloader):
def download(self, path: Path = ROOT.parent.joinpath("data"), ignore_exists: bool = False):
makedirs(str(path), exist_ok=True)
for file in self.files:
if file.name.endswith("EMPRECSV.zip"):
full_path = path.joinpath(file.name)
if not ignore_exists and full_path.exists():
self.logger.info(f"File {full_path} already exists, skipping...")
yield full_path
continue
self.logger.info(f"Downloading {full_path} [{file.size_str}]")
with open(full_path, "wb") as handler:
for chunk in self.get_file_content(file):
handler.write(chunk)
```
#### File: object/table/simples.py
```python
from ..base import AkiObject
from ...data.simples import Simples
from typing import NamedTuple
class AkiSimples(AkiObject):
__tuple: Simples = None
def __init__(self, simples_tuple: NamedTuple):
self.__original = simples_tuple
cnpj_basico = getattr(simples_tuple, "cnpj_basico", "0")
opcao_simples = str(getattr(simples_tuple, "opcao_simples", ""))
data_opcao_simples = str(getattr(simples_tuple, "data_opcao_simples", ""))
data_exclusao_simple = str(getattr(simples_tuple, "data_exclusao_simple", ""))
opcao_mei = str(getattr(simples_tuple, "opcao_mei", ""))
data_opcao_mei = str(getattr(simples_tuple, "data_opcao_mei", ""))
data_exclusao_mei = str(getattr(simples_tuple, "opcao_simples", ""))
self.__tuple = Simples(
index=int(cnpj_basico),
cnpj_basico=str(cnpj_basico).ljust(14, "X"),
opcao_simples=opcao_simples,
data_opcao_simples=data_opcao_simples,
data_exclusao_simple=data_exclusao_simple,
opcao_mei=opcao_mei,
data_opcao_mei=data_opcao_mei,
data_exclusao_mei=data_exclusao_mei
)
@property
def tuple(self) -> Simples:
return self.__tuple
``` |
{
"source": "4uiiurz1/kaggle-tgs-salt-identification-challenge",
"score": 3
} |
#### File: 4uiiurz1/kaggle-tgs-salt-identification-challenge/resnet.py
```python
import torch.nn as nn
from torchvision.models import resnet
import torch.utils.model_zoo as model_zoo
class ResNet(resnet.ResNet):
def __init__(self, block, layers, first_stride=1, num_classes=1000):
super().__init__(block, layers, num_classes)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=first_stride, padding=3,
bias=False)
def resnet18(pretrained=False, first_stride=1, **kwargs):
model = ResNet(resnet.BasicBlock, [2, 2, 2, 2], first_stride, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(resnet.model_urls['resnet18']))
return model
def resnet34(pretrained=False, first_stride=1, **kwargs):
model = ResNet(resnet.BasicBlock, [3, 4, 6, 3], first_stride, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(resnet.model_urls['resnet34']))
return model
def resnet50(pretrained=False, first_stride=1, **kwargs):
model = ResNet(resnet.Bottleneck, [3, 4, 6, 3], first_stride, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(resnet.model_urls['resnet50']))
return model
def resnet101(pretrained=False, first_stride=1, **kwargs):
model = ResNet(resnet.Bottleneck, [3, 4, 23, 3], first_stride, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(resnet.model_urls['resnet101']))
return model
def resnet152(pretrained=False, first_stride=1, **kwargs):
model = ResNet(resnet.Bottleneck, [3, 8, 36, 3], first_stride, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(resnet.model_urls['resnet152']))
return model
```
#### File: 4uiiurz1/kaggle-tgs-salt-identification-challenge/senet.py
```python
from collections import OrderedDict
import torch.nn as nn
from pretrainedmodels.models import senet
class SENet(senet.SENet):
def __init__(self, block, layers, groups, reduction, first_stride=1, dropout_p=0.2,
inplanes=128, input_3x3=True, downsample_kernel_size=3,
downsample_padding=1, num_classes=1000):
super().__init__(block, layers, groups, reduction, dropout_p,
inplanes, input_3x3, downsample_kernel_size,
downsample_padding, num_classes)
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=first_stride, padding=1,
bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,
bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,
bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=first_stride,
padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,
ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
def senet154(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16, first_stride=first_stride,
dropout_p=0.2, num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['senet154'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnet50(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEBottleneck, [3, 4, 6, 3], groups=1, reduction=16, first_stride=first_stride,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['se_resnet50'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnet101(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEBottleneck, [3, 4, 23, 3], groups=1, reduction=16, first_stride=first_stride,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['se_resnet101'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnet152(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEBottleneck, [3, 8, 36, 3], groups=1, reduction=16, first_stride=first_stride,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['se_resnet152'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, first_stride=first_stride,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['se_resnext50_32x4d'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnext101_32x4d(num_classes=1000, pretrained='imagenet', first_stride=1):
model = SENet(senet.SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, first_stride=first_stride,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = senet.pretrained_settings['se_resnext101_32x4d'][pretrained]
senet.initialize_pretrained_model(model, num_classes, settings)
return model
```
#### File: 4uiiurz1/kaggle-tgs-salt-identification-challenge/validate.py
```python
import time
import os
import math
import argparse
from glob import glob
from collections import OrderedDict
import random
import warnings
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import pandas as pd
import joblib
import cv2
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import log_loss
from skimage.io import imread, imsave
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import datasets, models, transforms
from dataset import Dataset
import archs
from metrics import mean_iou, dice_coef
from utils import depth_encode, coord_conv, pad, crop
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default=None,
help='model name')
args = parser.parse_args()
return args
def main():
val_args = parse_args()
args = joblib.load('models/%s/args.pkl' %val_args.name)
if not os.path.exists('output/%s' %args.name):
os.makedirs('output/%s' %args.name)
print('Config -----')
for arg in vars(args):
print('%s: %s' %(arg, getattr(args, arg)))
print('------------')
joblib.dump(args, 'models/%s/args.pkl' %args.name)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
# create model
print("=> creating model %s" %args.arch)
model = archs.__dict__[args.arch](args)
if args.gpu is not None:
model = model.cuda(args.gpu)
else:
model = torch.nn.DataParallel(model).cuda()
# Data loading code
train_df = pd.read_csv('input/train.csv')
img_paths = 'input/train/images/' + train_df['id'].values + '.png'
mask_paths = 'input/train/masks/' + train_df['id'].values + '.png'
if args.cv == 'KFold':
kf = KFold(n_splits=args.n_splits, shuffle=True, random_state=41)
cv = kf.split(img_paths)
elif args.cv == 'Cov':
train_df['cov'] = 0
for i in tqdm(range(len(train_df))):
mask = imread('input/train/masks/' + train_df['id'][i] + '.png')
mask = mask.astype('float32') / 255
train_df.loc[i, 'cov'] = ((np.sum(mask>0.5) / 101**2) * 10).astype('int')
skf = StratifiedKFold(n_splits=args.n_splits, shuffle=True, random_state=41)
cv = skf.split(img_paths, train_df['cov'])
if not os.path.exists('output/%s/val' %args.name):
os.makedirs('output/%s/val' %args.name)
for fold, (train_idx, val_idx) in enumerate(cv):
print('Fold [%d/%d]' %(fold+1, args.n_splits))
model.load_state_dict(torch.load('models/%s/model_%d.pth' %(args.name, fold+1)))
model.eval()
train_img_paths, val_img_paths = img_paths[train_idx], img_paths[val_idx]
train_mask_paths, val_mask_paths = mask_paths[train_idx], mask_paths[val_idx]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for i in tqdm(range(len(val_img_paths))):
image = imread(val_img_paths[i])
mask = imread(val_mask_paths[i])
image = image.astype('float32') / 255
mask = mask.astype('float32') / 65535
if 'Res' in args.arch:
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
for c in range(3):
image[:,:,c] = (image[:,:,c] - means[c]) / stds[c]
if 'Inception' in args.arch:
means = [0.5, 0.5, 0.5]
stds = [0.5, 0.5, 0.5]
for c in range(3):
image[:,:,c] = (image[:,:,c] - means[c]) / stds[c]
pbs = []
if not args.pad:
input = cv2.resize(image, (args.img_size, args.img_size))
else:
input = pad(image, args.img_size)
if args.depth:
input = depth_encode(input)
if args.coord_conv:
input = coord_conv(input)
input = input.transpose((2, 0, 1))
input = input[np.newaxis, :, :, :]
input = torch.from_numpy(input)
input = input.cuda(args.gpu)
output = model(input)
output = F.sigmoid(output)
pb = output.data.cpu().numpy()
pb = pb[0, 0, :, :]
if not args.pad:
pb = cv2.resize(pb, (101, 101))
else:
pb = crop(pb, 101)
pbs.append(pb)
if not args.pad:
input = cv2.resize(image[:, ::-1, :], (args.img_size, args.img_size))
else:
input = pad(image[:, ::-1, :], args.img_size)
if args.depth:
input = depth_encode(input)
if args.coord_conv:
input = coord_conv(input)
input = input.transpose((2, 0, 1))
input = input[np.newaxis, :, :, :]
input = torch.from_numpy(input)
input = input.cuda(args.gpu)
output = model(input)
output = F.sigmoid(output)
pb = output.data.cpu().numpy()[:, :, :, ::-1]
pb = pb[0, 0, :, :]
if not args.pad:
pb = cv2.resize(pb, (101, 101))
else:
pb = crop(pb, 101)
pbs.append(pb)
pb = np.mean(pbs, axis=0)
imsave('output/%s/val/%s' %(args.name, os.path.basename(val_img_paths[i])),
(pb*255).astype('uint8'))
torch.cuda.empty_cache()
# Loss
losses = []
for i in tqdm(range(len(mask_paths))):
mask = imread(mask_paths[i])
pb = imread('output/%s/val/%s' %(args.name, os.path.basename(img_paths[i])))
mask = (mask > 65535/2).astype('int')
pb = pb.astype('float64') / 255
loss = log_loss(mask.flatten(), pb.flatten(), labels=[0, 1])
losses.append(loss)
# IoU
thrs = np.linspace(0.4, 0.6, 21)
ious = []
for thr in thrs:
print('thr=%0.2f: ' %thr, end='')
tmp_ious = []
for i in tqdm(range(len(mask_paths))):
mask = imread(mask_paths[i])
pb = imread('output/%s/val/%s' %(args.name, os.path.basename(img_paths[i])))
mask = (mask > 65535/2).astype('int')
pb = pb.astype('float64') / 255
iou = mean_iou(mask, pb>thr)
tmp_ious.append(iou)
ious.append(np.mean(tmp_ious))
print(np.mean(tmp_ious))
val_info = {
'loss': np.mean(losses),
'best_iou': np.max(ious),
'best_thr': thrs[np.argmax(ious)]
}
print('Result -----')
print('Loss: %f' %val_info['loss']),
print('Best IoU: %f' %val_info['best_iou']),
print('Best threshold: %f' %val_info['best_thr'])
print('------------')
with open('models/%s/val_info.txt' %args.name, 'w') as f:
print('Result -----', file=f)
print('Loss: %f' %val_info['loss'], file=f),
print('Best IoU: %f' %val_info['best_iou'], file=f),
print('Best threshold: %.2f' %val_info['best_thr'], file=f)
print('------------', file=f)
joblib.dump(val_info, 'models/%s/val_info.pkl' %args.name)
if __name__ == '__main__':
main()
``` |
{
"source": "4uiiurz1/pytorch-scale-aware-triplet",
"score": 3
} |
#### File: 4uiiurz1/pytorch-scale-aware-triplet/archs.py
```python
import numpy as np
from torch import nn
from torch.nn import functional as F
import torch
from torchvision import models
import torchvision
class BNNet(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
nn.ZeroPad2d(2),
nn.Conv2d(1, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(16, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
)
self.classifier = nn.Sequential(
nn.Linear(4*4*64, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 2),
nn.BatchNorm1d(2, affine=False),
)
def forward(self, input):
x = self.features(input)
x = x.view(x.shape[0], -1)
output = self.classifier(x)
return output
``` |
{
"source": "4uiiurz1/tf-dark-pose",
"score": 2
} |
#### File: tf-dark-pose/src/optimizers.py
```python
import tensorflow as tf
def get_optimizer(config):
if config.train.optimizer == 'adam':
optimizer = tf.optimizers.Adam(lr=config.train.lr)
elif config.train.optimizer == 'sgd':
optimizer = tf.optimizers.SGD(lr=config.train.lr, momentum=0.9)
else:
raise NotImplementedError
return optimizer
```
#### File: tf-dark-pose/tests/test_dataset.py
```python
import random
import pytest
from easydict import EasyDict as edict
import yaml
import tensorflow as tf
from src.datasets import get_dataset
def test_coco_shuffle():
random.seed(71)
with open('data/config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config = edict(config)
dataset = get_dataset(
config,
image_set=config.dataset.test_set,
is_train=True,
)
generator = tf.data.Dataset.from_generator(
dataset.generator,
dataset.output_types,
)
generator = generator.map(lambda x: x, num_parallel_calls=tf.data.experimental.AUTOTUNE)
generator = generator.batch(config.train.batch_size_per_gpu)
generator = generator.prefetch(tf.data.experimental.AUTOTUNE)
file_names_list = []
for epoch in range(2):
file_names = []
for data in generator:
file_names.extend(data['file_name'].numpy().tolist())
file_names_list.append(file_names)
assert len(file_names_list[0]) == len(file_names_list[1])
matched_cnt = 0
for file_name1, file_name2 in zip(file_names_list[0], file_names_list[1]):
if file_name1 == file_name2:
matched_cnt += 1
assert matched_cnt / len(file_names_list[0]) <= 0.005
```
#### File: tf-dark-pose/tests/test_loss.py
```python
import pytest
import numpy as np
from src.losses import JointsMSELoss
def test_joint_mse_loss():
"""Check if loss matches pytorch.
Expected value is created by the following code:
.. code-block:: python
import torch.nn as nn
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape(
(batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
if __name__ == '__main__':
import numpy as np
import torch
np.random.seed(71)
output = torch.from_numpy(np.random.rand(32, 32, 24, 17).astype(np.float32))
output = output.permute(0, 3, 1, 2)
target = torch.from_numpy(np.random.rand(
32, 32, 24, 17).astype(np.float32))
target = target.permute(0, 3, 1, 2)
target_weight = torch.from_numpy(
np.random.rand(32, 17, 1).astype(np.float32))
criterion = JointsMSELoss(use_target_weight=True)
loss = criterion(output, target, target_weight)
print(loss.numpy())
"""
np.random.seed(71)
output = np.random.rand(32, 32, 24, 17).astype(np.float32)
target = np.random.rand(32, 32, 24, 17).astype(np.float32)
target_weight = np.random.rand(32, 17, 1).astype(np.float32)
criterion = JointsMSELoss(use_target_weight=True)
loss = criterion(output, target, target_weight)
loss = loss.numpy()
assert loss == pytest.approx(0.030641317, abs=1e-9)
``` |
{
"source": "4uk/project",
"score": 2
} |
#### File: project/pandas_analysis/views.py
```python
from django.shortcuts import render
# Импортируем стандартные модули для пагинации страниц
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Подключаем модуль для фиксирования времени
import time
# Подключаем модуль для анализа pandas
import pandas as pd
# Подключаем конфигурационный файл для импорта констант
from django.conf import settings
'''
Модуль для создания дампов. Будет использован только для pandas, поскольку модуль намного медленее работает чем numpy VG
Пример кода для создания дампа
dt = pd.read_csv('base.txt', index_col=False, delimiter=';', names=['date', 'hours', 'minutes', 'seconds', 'gap', 'grp', 'v', 'gi', 's1', 's2', 's3'])
dt.to_pickle('pandas.pickle', compression='infer')
'''
import pickle
# Создаем вид для рендеринга главной страници
def index(request):
return render(request, 'home.html')
# Создаем вид для рендеринга страницы формы
def pandas_page(request):
return render(request, 'pandas_page.html')
# Создаем вид для обработки вариантов pandas
def pandas_processing(request):
# Обявляем глобальные переменные т.к будем работать не только с post запросами
global end
global pandas_data
# Проверяем тип запроса формы
if request.method == "POST":
# Получаем значение варианта из формы
c = request.POST.get('choice', None)
# Обработка варианта 1
if c == 'c1':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений global_active_power что больше 5
pandas_data = pandas_data[pandas_data['gap'] > 5]
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время исполнения
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 2
elif c == 'c2':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений voltage что больше 235
pandas_data = pandas_data[pandas_data['v'] > 235]
# Для вольтажа был создан отдельный дамп, поскольку поиск всех значений требовал бы больше ресурсов сервера
with open(settings.VOLTAGE_DUMP, 'rb') as handle:
# Присваиваем значение для пагинатора
paginator = pickle.load(handle)
# Фиксируем время
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 3
elif c == 'c3':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений global_intensity что больше 19 и меньше 20
pandas_data = pandas_data[(pandas_data['gi'] >= 19) & (pandas_data['gi'] <= 20)]
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 4
elif c == 'c4':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Генерируем 500000 уникальных строк
pandas_data = pandas_data.sample(n=500000, replace=True)
# Расчет среднего значения для группы 1
s1_average = sum(pandas_data['s1'])/500000
# Расчет среднего значения для группы 2
s2_average = sum(pandas_data['s2'])/500000
# Расчет среднего значения для группы 3
s3_average = sum(pandas_data['s3'])/500000
# Поиск записей после 18:00
pandas_data = pandas_data[(pandas_data['hours'] >= 18) & (pandas_data['minutes'] > 0)]
# Из предидушего фрейма выводим те в которых global_active_power больше 6
pandas_data = pandas_data[pandas_data['gap'] > 6]
# Из полученого результата ищем все записи где группа 2 больше группы 1 и группы 3
pandas_data = pandas_data[(pandas_data['s2'] > pandas_data['s1']) & (pandas_data['s2'] > pandas_data['s3'])]
# Ищем размер полученого фрейма и делим пополам
l = len(pandas_data) // 2
# Делаем срез 1й части
first_part = pandas_data[:l]
# Из первой части выбираем каждое 3е значение
first_part = first_part[::3]
# Делаем срез 2й части
second_part = pandas_data[l:]
# Из второй части выбираем каждое 4е значение
second_part = second_part[::4]
# Создаем список из полученых частей
f = [first_part, second_part]
# Обединяем 2 части в 1
pandas_data = pd.concat(f)
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end, 'av1': s1_average, 'av2': s2_average, 'av3': s3_average}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Данное условие необходимо для навигации по страницам
else:
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Получаем фрейм из глобальной переменной и преобразовуем в список
paginator = Paginator(pandas_data.values.tolist(), 1000)
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
```
#### File: 4uk/project/script.py
```python
import urllib.request
import pandas as pd
from spyre import server
import datetime
import os
import fileinput
import pickle
import time
import subprocess
class SpyreServer(server.App):
title = "Днепропетровская область"
inputs = [{
"type": "dropdown",
"label": "Начальное значение годового диапазона",
"options": [
{"label": "Не выбрано", "value": ""},
{"label": "1981", "value": 1981},
{"label": "1982", "value": 1982},
{"label": "1983", "value": 1983},
{"label": "1984", "value": 1984},
{"label": "1985", "value": 1985},
{"label": "1986", "value": 1986},
{"label": "1987", "value": 1987},
{"label": "1988", "value": 1988},
{"label": "1989", "value": 1989},
{"label": "1990", "value": 1990},
{"label": "1991", "value": 1991},
{"label": "1992", "value": 1992},
{"label": "1993", "value": 1993},
{"label": "1994", "value": 1994},
{"label": "1995", "value": 1995},
{"label": "1996", "value": 1996},
{"label": "1997", "value": 1997},
{"label": "1998", "value": 1998},
{"label": "1999", "value": 1999},
{"label": "2000", "value": 2000},
{"label": "2001", "value": 2001},
{"label": "2002", "value": 2002},
{"label": "2003", "value": 2003},
{"label": "2004", "value": 2004},
{"label": "2005", "value": 2005},
{"label": "2006", "value": 2006},
{"label": "2007", "value": 2007},
{"label": "2008", "value": 2008},
{"label": "2009", "value": 2009},
{"label": "2010", "value": 2010},
{"label": "2011", "value": 2011},
{"label": "2012", "value": 2012},
{"label": "2013", "value": 2013},
{"label": "2014", "value": 2014},
{"label": "2015", "value": 2015},
{"label": "2016", "value": 2016},
{"label": "2017", "value": 2017}
],
"key": "year_begin",
"action_id": "update_data"
},
{
"type": "dropdown",
"label": "Конечное значение годового диапазона",
"options": [
{"label": "Не выбрано", "value": ""},
{"label": "1981", "value": 1981},
{"label": "1982", "value": 1982},
{"label": "1983", "value": 1983},
{"label": "1984", "value": 1984},
{"label": "1985", "value": 1985},
{"label": "1986", "value": 1986},
{"label": "1987", "value": 1987},
{"label": "1988", "value": 1988},
{"label": "1989", "value": 1989},
{"label": "1990", "value": 1990},
{"label": "1991", "value": 1991},
{"label": "1992", "value": 1992},
{"label": "1993", "value": 1993},
{"label": "1994", "value": 1994},
{"label": "1995", "value": 1995},
{"label": "1996", "value": 1996},
{"label": "1997", "value": 1997},
{"label": "1998", "value": 1998},
{"label": "1999", "value": 1999},
{"label": "2000", "value": 2000},
{"label": "2001", "value": 2001},
{"label": "2002", "value": 2002},
{"label": "2003", "value": 2003},
{"label": "2004", "value": 2004},
{"label": "2005", "value": 2005},
{"label": "2006", "value": 2006},
{"label": "2007", "value": 2007},
{"label": "2008", "value": 2008},
{"label": "2009", "value": 2009},
{"label": "2010", "value": 2010},
{"label": "2011", "value": 2011},
{"label": "2012", "value": 2012},
{"label": "2013", "value": 2013},
{"label": "2014", "value": 2014},
{"label": "2015", "value": 2015},
{"label": "2016", "value": 2016},
{"label": "2017", "value": 2017}
],
"key": "year_end",
"action_id": "update_data"
},
{
"type": "dropdown",
"label": "<NAME>",
"options": [
{"label": "Не выбрано", "value": ''},
{"label": "1", "value": 1},
{"label": "2", "value": 2},
{"label": "3", "value": 3},
{"label": "4", "value": 4},
{"label": "5", "value": 5},
{"label": "6", "value": 6},
{"label": "7", "value": 7},
{"label": "8", "value": 8},
{"label": "9", "value": 9},
{"label": "10", "value": 10},
{"label": "11", "value": 11},
{"label": "12", "value": 12},
{"label": "13", "value": 13},
{"label": "14", "value": 14},
{"label": "15", "value": 15},
{"label": "16", "value": 16},
{"label": "17", "value": 17},
{"label": "18", "value": 18},
{"label": "19", "value": 19},
{"label": "20", "value": 20},
{"label": "21", "value": 21},
{"label": "22", "value": 22},
{"label": "23", "value": 23},
{"label": "24", "value": 24},
{"label": "25", "value": 25},
{"label": "26", "value": 26},
{"label": "27", "value": 27},
{"label": "28", "value": 28},
{"label": "29", "value": 29},
{"label": "30", "value": 30},
{"label": "31", "value": 31},
{"label": "32", "value": 32},
{"label": "33", "value": 33},
{"label": "34", "value": 34},
{"label": "35", "value": 35},
{"label": "36", "value": 36},
{"label": "37", "value": 37},
{"label": "38", "value": 38},
{"label": "39", "value": 39},
{"label": "40", "value": 40},
{"label": "41", "value": 41},
{"label": "42", "value": 42},
{"label": "43", "value": 43},
{"label": "44", "value": 44},
{"label": "45", "value": 45},
{"label": "46", "value": 46},
{"label": "47", "value": 47},
{"label": "48", "value": 48},
{"label": "49", "value": 49},
{"label": "50", "value": 50},
{"label": "51", "value": 51},
{"label": "52", "value": 52}
],
"key": "weeks",
"action_id": "update_data"
},
{
"type": "checkboxgroup",
"label": "Поиск экстремумов индекса VHI",
"options": [
{"label": "MAX - значение", "value": "max"},
{"label": "MIN - значение", "value": "min"}],
"key": "vhi_extremum",
"action_id": "update_data",
},
{
"type": "dropdown",
"label": "Соотношение вегетационного индекса",
"options": [
{"label": "Не выбрано", "value": ""},
{"label": "Благоприятные условия", "value": 60},
{"label": "Стрессовые условия", "value": 40},
{"label": "Интенсивность засухи от средней до чрезвычайной", "value": 15},
{"label": "Интенсивность засухи от умеренной до чрезвычайной", "value": 35}
],
"key": "vhi_correlation",
"action_id": "update_data"
},
{
"type": "dropdown",
"label": "Экстремальные засухи, что коснулись больше указанного процента области",
"options": [
{"label": "Не выбрано", "value": ""},
{"label": "0%", "value": "0"},
{"label": "5%", "value": "5"},
{"label": "10%", "value": "10"},
{"label": "15%", "value": "15"},
{"label": "20%", "value": "20"},
{"label": "25%", "value": "25"},
{"label": "30%", "value": "30"},
{"label": "35%", "value": "35"},
{"label": "40%", "value": "40"},
{"label": "45%", "value": "45"},
{"label": "50%", "value": "50"},
{"label": "55%", "value": "55"},
{"label": "60%", "value": "60"},
{"label": "65%", "value": "65"},
{"label": "70%", "value": "70"},
{"label": "75%", "value": "75"},
{"label": "80%", "value": "80"},
{"label": "85%", "value": "85"},
{"label": "90%", "value": "90"},
{"label": "95%", "value": "95"},
{"label": "100%", "value": "100"}
],
"key": "vhi_rate",
"action_id": "update_data"
},
{
"type": "dropdown",
"label": "Умеренные засухи, что коснулись больше указанного процента области",
"options": [
{"label": "Не выбрано", "value": ""},
{"label": "0%", "value": "0"},
{"label": "5%", "value": "5"},
{"label": "10%", "value": "10"},
{"label": "15%", "value": "15"},
{"label": "20%", "value": "20"},
{"label": "25%", "value": "25"},
{"label": "30%", "value": "30"},
{"label": "35%", "value": "35"},
{"label": "40%", "value": "40"},
{"label": "45%", "value": "45"},
{"label": "50%", "value": "50"},
{"label": "55%", "value": "55"},
{"label": "60%", "value": "60"},
{"label": "65%", "value": "65"},
{"label": "70%", "value": "70"},
{"label": "75%", "value": "75"},
{"label": "80%", "value": "80"},
{"label": "85%", "value": "85"},
{"label": "90%", "value": "90"},
{"label": "95%", "value": "95"},
{"label": "100%", "value": "100"}
],
"key": "vhi_mild",
"action_id": "update_data"
},
]
controls = [{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
},
{
"type": "hidden",
"id": "update_data",
}
]
tabs = ["Table", "VHI-индексы", "TCI-индексы", "VCI-индексы"]
outputs = [
{
"type": "table",
"id": "table_id",
"control_id": 'update_data',
"tab": "Table",
"on_page_load": True
},
{
"type": "plot",
"id": "vhi_plot",
"control_id": "update_data",
"tab": "VHI-индексы",
"on_page_load": True
},
{
"type": "plot",
"id": "tci_plot",
"control_id": "update_data",
"tab": "TCI-индексы",
"on_page_load": True
},
{
"type": "plot",
"id": "vci_plot",
"control_id": "update_data",
"tab": "VCI-индексы",
"on_page_load": True
}
]
def vhi_data_parsing(self, fname):
url = urllib.request.urlopen('https://www.star.nesdis.noaa.gov/smcd/emb/vci/VH/get_provinceData.php?country=UKR&provinceID=5&year1=1981&year2=2017&type=Mean')
parsed_data = url.read()
with open(fname, 'wb') as data:
data.write(parsed_data)
while True:
try:
with fileinput.FileInput(fname, inplace=True) as file:
for line in file:
print(line.replace(', provinceID,', '').replace(' ', ',').replace(' ', ',').replace(',,', ',').replace(',', ', ').replace('</pre></tt>', ''), end='')
return False
except ValueError:
time.sleep(0.7)
with fileinput.FileInput(fname, inplace=True) as file:
for line in file:
print(line.replace(', provinceID,', '').replace(' ', ',').replace(' ', ',').replace(',,', ',').replace(',', ', ').replace('</pre></tt>', ''), end='')
return False
def vh_data_parsing(self, fname):
url = urllib.request.urlopen('https://www.star.nesdis.noaa.gov/smcd/emb/vci/VH/get_provinceData.php?country=UKR&provinceID=5&year1=1981&year2=2017&type=VHI_Parea')
parsed_data = url.read()
with open(fname, 'wb') as data:
data.write(parsed_data)
while True:
try:
with fileinput.FileInput(fname, inplace=True) as file:
for line in file:
print(line.replace(', provinceID,', '').replace(' ', ',').replace(' ', ',').replace(',,', ',').replace(',', ', ').replace('</pre></tt>', ''), end='')
return False
except ValueError:
time.sleep(0.7)
with fileinput.FileInput(fname, inplace=True) as file:
for line in file:
print(line.replace(', provinceID,', '').replace(' ', ',').replace(' ', ',').replace(',,', ',').replace(',', ', ').replace('</pre></tt>', ''), end='')
return False
def vhi_filename_creation(self):
global serial_data
serial_data = {}
serial_filepath = os.path.abspath('filenames.pickle')
curr_date = str(datetime.datetime.now())[0:10]
full_date = str(datetime.datetime.now()).replace(' ', '-')[:-7]
while True:
try:
with open(serial_filepath, 'rb') as d:
serial_data = pickle.load(d)
break
except FileNotFoundError:
open(serial_filepath, 'wb').close()
continue
except EOFError:
serial_data = {}
break
date = serial_data.get(curr_date, None)
if not date:
serial_data[curr_date] = {}
vhi = serial_data.get(curr_date).get('vhi', None)
if not vhi:
serial_data[curr_date]['vhi'] = ''
fname = serial_data.get(curr_date).get('vhi', None)
if not fname:
serial_data[curr_date]['vhi'] = 'vhi_dnipropetrovska_3_%s.csv' % full_date
with open(serial_filepath, 'wb') as file:
pickle.dump(serial_data, file)
return serial_data.get(curr_date).get('vhi', None)
return fname
def vh_filename_creation(self):
global serial_data
serial_data = {}
serial_filepath = os.path.abspath('filenames.pickle')
curr_date = str(datetime.datetime.now())[0:10]
full_date = str(datetime.datetime.now()).replace(' ', '-')[:-7]
while True:
try:
with open(serial_filepath, 'rb') as d:
serial_data = pickle.load(d)
break
except FileNotFoundError:
open(serial_filepath, 'wb').close()
continue
except EOFError:
serial_data = {}
break
date = serial_data.get(curr_date, None)
if not date:
serial_data[curr_date] = {}
vh = serial_data.get(curr_date).get('vh', None)
if not vh:
serial_data[curr_date]['vh'] = ''
fname = serial_data.get(curr_date).get('vh', None)
if not fname:
serial_data[curr_date]['vh'] = 'vh_dnipropetrovska_3_%s.csv' % full_date
with open(serial_filepath, 'wb') as file:
pickle.dump(serial_data, file)
return serial_data.get(curr_date).get('vh', None)
return fname
def vhi_frame_creator(self):
fname = self.vhi_filename_creation()
curr_date = str(datetime.datetime.now())[0:10]
fname_path = os.path.join(os.path.abspath(os.getcwd()), curr_date, fname)
upload_dir = os.path.join(os.path.abspath(curr_date))
if not os.path.isdir(os.path.join(os.path.abspath(os.getcwd()), curr_date)):
try:
subprocess.call(['mkdir %s' % upload_dir], stdout=subprocess.PIPE, shell=True)
except:
pass
if not os.path.isfile(os.path.join(os.path.abspath(os.getcwd()), curr_date, fname)):
self.vhi_data_parsing(fname_path)
data_frame = pd.read_csv(os.path.join(os.path.abspath(os.getcwd()), curr_date, fname), index_col=False, header=1, names=['year', 'week', 'smn', 'smt', 'vci', 'tci', 'vhi'], delimiter=',', error_bad_lines=False)
return data_frame
def vh_frame_creator(self):
fname = self.vh_filename_creation()
curr_date = str(datetime.datetime.now())[0:10]
fname_path = os.path.join(os.path.abspath(os.getcwd()), curr_date, fname)
upload_dir = os.path.join(os.path.abspath(curr_date))
if not os.path.isdir(os.path.join(os.path.abspath(os.getcwd()), curr_date)):
try:
subprocess.call(['mkdir %s' % upload_dir], stdout=subprocess.PIPE, shell=True)
except:
pass
if not os.path.isfile(os.path.join(os.path.abspath(os.getcwd()), curr_date, fname)):
self.vh_data_parsing(fname_path)
data_frame = pd.read_csv(os.path.join(os.path.abspath(os.getcwd()), curr_date, fname), index_col=False, header=1, names=['year', 'week', '0%', '5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%', '45%', '50%', '55%', '60%', '65%', '70%', '75%', '80%', '85%', '90%', '95%', '100%'], delimiter=',', error_bad_lines=False)
return data_frame
def plot_maker_vhi(self):
fname = self.vhi_filename_creation()
upload_dir = str(datetime.datetime.now())[0:10]
if not os.path.isdir(os.path.join(os.path.abspath(os.getcwd()), upload_dir)):
try:
subprocess.call(['mkdir %s' % upload_dir], stdout=subprocess.PIPE, shell=True)
except:
pass
fname_path = os.path.join(os.path.abspath(os.getcwd()), upload_dir, fname)
while True:
r = os.path.isfile(fname_path)
if r:
break
time.sleep(0.7)
data_frame = pd.read_csv(fname_path, index_col=False, header=1, names=['year', 'week', 'smn', 'smt', 'vci', 'tci', 'vhi'], delimiter=',', error_bad_lines=False)
return data_frame
def plot_maker_vh(self):
fname = self.vh_filename_creation()
upload_dir = str(datetime.datetime.now())[0:10]
if not os.path.isdir(os.path.join(os.path.abspath(os.getcwd()), upload_dir)):
try:
subprocess.call(['mkdir %s' % upload_dir], stdout=subprocess.PIPE, shell=True)
except:
pass
fname_path = os.path.join(os.path.abspath(os.getcwd()), upload_dir, fname)
while True:
r = os.path.isfile(fname_path)
if r:
break
time.sleep(0.7)
data_frame = pd.read_csv(fname_path, index_col=False, header=1, names=['year', 'week', '0%', '5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%', '45%', '50%', '55%', '60%', '65%', '70%', '75%', '80%', '85%', '90%', '95%', '100%'], delimiter=',', error_bad_lines=False)
return data_frame
def getData(self, params):
year_begin = params['year_begin']
year_end = params['year_end']
weeks = params['weeks']
vhi_extremum = params['vhi_extremum']
vhi_correlation = params['vhi_correlation']
vhi_rate = params['vhi_rate']
vhi_mild = params['vhi_mild']
if year_begin and year_end:
if year_end < year_begin:
current = year_begin
year_begin = year_end
year_end = current
if not vhi_rate and not vhi_mild:
df = self.vhi_frame_creator()
if year_begin:
if weeks and not year_end:
return df[(df['year'] == int(year_begin)) & (df['week'] == int(weeks))]
elif vhi_extremum and not year_end:
if vhi_extremum[0] == 'max':
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))]
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))]
elif year_end:
if vhi_extremum:
if vhi_extremum[0] == 'max':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))]
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))]
elif weeks:
if vhi_correlation and year_end and weeks:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] > 60)]
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 40)]
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 15)]
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 35)]
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks))]
elif vhi_correlation and year_end:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] > 60)]
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 40)]
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 15)]
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 35)]
else:
return df[df['year'].between(int(year_begin), int(year_end))]
elif vhi_correlation and not year_end:
if int(vhi_correlation) == 60:
return df[(df['year'] == int(year_begin)) & (df['vhi'] > 60)]
elif int(vhi_correlation) == 40:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 40)]
elif int(vhi_correlation) == 15:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 15)]
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 35)]
else:
return df[(df['year'] == int(year_begin))]
elif vhi_extremum:
if vhi_extremum[0] == 'max':
return df[df['vhi'] == df.groupby('year')['vhi'].transform('max')]
else:
return df[df['vhi'] == df.groupby('year')['vhi'].transform('min')]
elif vhi_correlation:
if int(vhi_correlation) == 60:
return df[(df['vhi'] > 60)]
elif int(vhi_correlation) == 40:
return df[(df['vhi'] < 40)]
elif int(vhi_correlation) == 15:
return df[(df['vhi'] < 15)]
else:
return df[(df['vhi'] < 35)]
elif year_end:
if year_begin:
return df[df['year'].between(int(year_begin), int(year_end))]
else:
return df[df['year'] == int(year_end)]
else:
return df
elif vhi_rate:
df = self.vh_frame_creator()
if year_begin:
if vhi_rate and not year_end:
if vhi_rate == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 15)]
elif vhi_rate == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)]
elif vhi_rate == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)]
else:
return df
elif vhi_rate and year_end:
if vhi_rate == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 15)]
elif vhi_rate == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)]
elif vhi_rate == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)]
else:
return df
elif vhi_rate and not year_begin:
if vhi_rate == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 15)]
elif vhi_rate == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 15)]
elif vhi_rate == '95':
return df[(df['year'] & df['100%'] < 15)]
elif vhi_rate == '100':
return df[(df['year'] & df['100%'] < 15)]
else:
return df
elif vhi_mild and not vhi_rate:
df = self.vh_frame_creator()
if year_begin:
if vhi_mild and not year_end:
if vhi_mild == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 35)]
elif vhi_mild == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)]
elif vhi_mild == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)]
else:
return df
elif vhi_mild and year_end:
if vhi_mild == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 35)]
elif vhi_mild == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)]
elif vhi_mild == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)]
else:
return df
elif vhi_mild and not year_begin:
if vhi_mild == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 35)]
elif vhi_mild == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 35)]
elif vhi_mild == '95':
return df[(df['year'] & df['100%'] < 35)]
elif vhi_mild == '100':
return df[(df['year'] & df['100%'] < 35)]
else:
return df
def vhi_plot(self, params):
year_begin = params['year_begin']
year_end = params['year_end']
weeks = params['weeks']
vhi_extremum = params['vhi_extremum']
vhi_correlation = params['vhi_correlation']
vhi_rate = params['vhi_rate']
vhi_mild = params['vhi_mild']
if year_begin and year_end:
if year_end < year_begin:
current = year_begin
year_begin = year_end
year_end = current
if not vhi_rate and not vhi_mild:
df = self.plot_maker_vhi()
if year_begin:
if weeks and not year_end:
return df[(df['year'] == int(year_begin)) & (df['week'] == int(weeks))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum and not year_end:
if vhi_extremum[0] == 'max':
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if vhi_extremum:
if vhi_extremum[0] == 'max':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif weeks:
if vhi_correlation and year_end and weeks:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] > 60)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 40)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 15)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 35)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and year_end:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] > 60)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 40)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 15)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 35)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and not year_end:
if int(vhi_correlation) == 60:
return df[(df['year'] == int(year_begin)) & (df['vhi'] > 60)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 40)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 15)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 35)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum:
if vhi_extremum[0] == 'max':
return df[df['vhi'] == df.groupby('year')['vhi'].transform('max')].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['vhi'] == df.groupby('year')['vhi'].transform('min')].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation:
if int(vhi_correlation) == 60:
return df[(df['vhi'] > 60)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['vhi'] < 40)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['vhi'] < 15)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['vhi'] < 35)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if year_begin:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'] == int(year_end)].drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df.drop(['vci', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_rate and not year_end:
if vhi_rate == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and year_end:
if vhi_rate == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and not year_begin:
if vhi_rate == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_mild and not year_end:
if vhi_mild == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and year_end:
if vhi_mild == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not year_begin:
if vhi_mild == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
def tci_plot(self, params):
year_begin = params['year_begin']
year_end = params['year_end']
weeks = params['weeks']
vhi_extremum = params['vhi_extremum']
vhi_correlation = params['vhi_correlation']
vhi_rate = params['vhi_rate']
vhi_mild = params['vhi_mild']
if year_begin and year_end:
if year_end < year_begin:
current = year_begin
year_begin = year_end
year_end = current
if not vhi_rate and not vhi_mild:
df = self.plot_maker_vhi()
if year_begin:
if weeks and not year_end:
return df[(df['year'] == int(year_begin)) & (df['week'] == int(weeks))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum and not year_end:
if vhi_extremum[0] == 'max':
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if vhi_extremum:
if vhi_extremum[0] == 'max':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif weeks:
if vhi_correlation and year_end and weeks:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] > 60)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 40)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 15)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 35)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and year_end:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] > 60)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 40)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 15)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 35)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and not year_end:
if int(vhi_correlation) == 60:
return df[(df['year'] == int(year_begin)) & (df['vhi'] > 60)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 40)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 15)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 35)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum:
if vhi_extremum[0] == 'max':
return df[df['vhi'] == df.groupby('year')['vhi'].transform('max')].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['vhi'] == df.groupby('year')['vhi'].transform('min')].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation:
if int(vhi_correlation) == 60:
return df[(df['vhi'] > 60)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['vhi'] < 40)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['vhi'] < 15)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['vhi'] < 35)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if year_begin:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'] == int(year_end)].drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df.drop(['vhi', 'vci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_rate and not year_end:
if vhi_rate == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and year_end:
if vhi_rate == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and not year_begin:
if vhi_rate == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_mild and not year_end:
if vhi_mild == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and year_end:
if vhi_mild == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not year_begin:
if vhi_mild == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
def vci_plot(self, params):
year_begin = params['year_begin']
year_end = params['year_end']
weeks = params['weeks']
vhi_extremum = params['vhi_extremum']
vhi_correlation = params['vhi_correlation']
vhi_rate = params['vhi_rate']
vhi_mild = params['vhi_mild']
if year_begin and year_end:
if year_end < year_begin:
current = year_begin
year_begin = year_end
year_end = current
if not vhi_rate and not vhi_mild:
df = self.plot_maker_vhi()
if year_begin:
if weeks and not year_end:
return df[(df['year'] == int(year_begin)) & (df['week'] == int(weeks))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum and not year_end:
if vhi_extremum[0] == 'max':
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if vhi_extremum:
if vhi_extremum[0] == 'max':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('max'))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] == df.groupby('year')['vhi'].transform('min'))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif weeks:
if vhi_correlation and year_end and weeks:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] > 60)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 40)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 15)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks)) & (df['vhi'] < 35)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['week'] == int(weeks))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and year_end:
if int(vhi_correlation) == 60:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] > 60)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 40)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 15)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['vhi'] < 35)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation and not year_end:
if int(vhi_correlation) == 60:
return df[(df['year'] == int(year_begin)) & (df['vhi'] > 60)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 40)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 15)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin)) & (df['vhi'] < 35)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['year'] == int(year_begin))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_extremum:
if vhi_extremum[0] == 'max':
return df[df['vhi'] == df.groupby('year')['vhi'].transform('max')].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['vhi'] == df.groupby('year')['vhi'].transform('min')].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_correlation:
if int(vhi_correlation) == 60:
return df[(df['vhi'] > 60)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 40:
return df[(df['vhi'] < 40)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif int(vhi_correlation) == 15:
return df[(df['vhi'] < 15)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[(df['vhi'] < 35)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif year_end:
if year_begin:
return df[df['year'].between(int(year_begin), int(year_end))].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df[df['year'] == int(year_end)].drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
else:
return df.drop(['vhi', 'tci'], axis=1).plot.area(x='year', figsize=(11, 6))
elif vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_rate and not year_end:
if vhi_rate == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and year_end:
if vhi_rate == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_rate and not year_begin:
if vhi_rate == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '95':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
elif vhi_rate == '100':
return df[(df['year'] & df['100%'] < 15)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not vhi_rate:
df = self.plot_maker_vh()
if year_begin:
if vhi_mild and not year_end:
if vhi_mild == '0':
return df[(df['year'] == int(year_begin)) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'] == int(year_begin)) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'] == int(year_begin)) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'] == int(year_begin)) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'] == int(year_begin)) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'] == int(year_begin)) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'] == int(year_begin)) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'] == int(year_begin)) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'] == int(year_begin)) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'] == int(year_begin)) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'] == int(year_begin)) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'] == int(year_begin)) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'] == int(year_begin)) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'] == int(year_begin)) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'] == int(year_begin)) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'] == int(year_begin)) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'] == int(year_begin)) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'] == int(year_begin)) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'] == int(year_begin)) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] == int(year_begin) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and year_end:
if vhi_mild == '0':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year'].between(int(year_begin), int(year_end))) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'].between(int(year_begin), int(year_end)) & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
elif vhi_mild and not year_begin:
if vhi_mild == '0':
return df[(df['year']) & (df['5%'] + df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '5':
return df[(df['year']) & (df['10%'] + df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '10':
return df[(df['year']) & (df['15%'] + df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '15':
return df[(df['year']) & (df['20%'] + df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '20':
return df[(df['year']) & (df['25%'] + df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '25':
return df[(df['year']) & (df['30%'] + df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '30':
return df[(df['year']) & (df['35%'] + df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '35':
return df[(df['year']) & (df['40%'] + df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '40':
return df[(df['year']) & (df['45%'] + df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '45':
return df[(df['year']) & (df['50%'] + df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '50':
return df[(df['year']) & (df['55%'] + df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '55':
return df[(df['year']) & (df['60%'] + df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '60':
return df[(df['year']) & (df['65%'] + df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '65':
return df[(df['year']) & (df['70%'] + df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '70':
return df[(df['year']) & (df['75%'] + df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '75':
return df[(df['year']) & (df['80%'] + df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '80':
return df[(df['year']) & (df['85%'] + df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '85':
return df[(df['year']) & (df['90%'] + df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '90':
return df[(df['year']) & (df['95%'] + df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '95':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
elif vhi_mild == '100':
return df[(df['year'] & df['100%'] < 35)].plot.area(x='year', figsize=(11, 6))
else:
return df.plot.area(x='year', figsize=(11, 6))
if __name__ == '__main__':
socket = SpyreServer()
socket.launch(host='0.0.0.0', port=8888)
``` |
{
"source": "4very/mplus_leaderboard",
"score": 2
} |
#### File: mplus_leaderboard/data/updateGuild.py
```python
from time import strftime
from json import dump, load
from logging import root
from os.path import join, exists
from os import mkdir
from datetime import datetime, timedelta
from requests.api import get
import pytz
from WOW import WOW_getGuildRoster
from updateMeta import NumberToClassName, getColorForRunScore, getColorForScore, NumberToClassColor
from RIO import RIO_GetCharData, RIO_GetCharRankings, RIO_GetRecentRuns
from updatePage import AddScoreColors, AddTimeAndPercDiff
from updateMeta import updateTimeFile
def UpdateGuildRoster(rosterfile):
guildData = WOW_getGuildRoster()
writeObj = {}
for member in guildData:
char = member['character']
if char['level'] == 60:
name = char['name']
realm = char['realm']['slug']
rio_data = RIO_GetCharRankings(name,realm)
try:
rio_link = rio_data['profile_url']
rio_score = rio_data['mythic_plus_scores_by_season'][0]['scores']['all']
rio_scoreColor = getColorForScore(rio_score)
except:
rio_link = None
rio_score = 0
rio_scoreColor = '#ffffff'
writeObj[char['id']] = {
'name': name,
'realm': realm,
'faction': 'horde' if char['realm']['slug'] == 'illidan' else 'alliance',
'class': char['playable_class']['id'],
'className': NumberToClassName[char['playable_class']['id']],
'classColor': NumberToClassColor[char['playable_class']['id']],
'race': char['playable_race']['id'],
'rank': member['rank'],
'score': rio_score,
'scoreColor': rio_scoreColor,
'links': {
'rio': rio_link,
'armory': f'https://worldofwarcraft.com/en-us/character/us/{realm}/{name}',
'wcl': f'https://www.warcraftlogs.com/character/us/{realm}/{name}',
'rbot': f'https://www.raidbots.com/simbot/quick?region=us&realm={realm}&name={name}'
}
}
with open(rosterfile,'w') as f:
dump(writeObj, f, indent=2)
root.info("updated guild roster")
def UpdateGuildRuns(folder, roster, startDate, endDate):
rosterData = getRosterData(roster)
runData = getAllRuns(rosterData)
runData = removeInvalid(runData, startDate, endDate)
AddScoreColors(runData)
AddTimeAndPercDiff(runData)
runsFile = join(folder, 'runs.json')
if exists(runsFile):
with open(runsFile,'r') as f:
oldRunData = load(f)
else:
oldRunData = {'data': {}}
oldRunData['data'] = {**oldRunData['data'], **runData}
with open(join(folder,'runs.json'),'w') as f:
dump(oldRunData, f, indent=2)
updateTimeFile(folder)
return
def getRosterData(rosterFile):
with open(rosterFile, 'r') as f:
return load(f)
def getAllRuns(rosterData):
retval = {}
for id, member in rosterData.items():
playerData = RIO_GetRecentRuns(member['name'], member['realm'])
for runId, run in playerData.items():
if runId in retval.keys():
retval[runId]['members'].append(id)
retval[runId]['count'] += 1
else:
retval[runId] = run
retval[runId]['members'] = [id]
retval[runId]['count'] = 1
return retval
def removeInvalid(runs, start, end):
retval = {}
for runId, runData in runs.items():
runDate = datetime.strptime(runData['dateCompleted'], "%Y-%m-%dT%H:%M:%S.000Z")
startDate = datetime.utcfromtimestamp(start)
endDate = datetime.utcfromtimestamp(end)
if startDate < runDate and runDate < endDate and runData['count'] >= 2:
retval[runId] = runData
return retval
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def PrepFolder(folder,start,end,weekNum):
if not exists(folder):
mkdir(folder)
metaFile = join(folder,'meta.json')
if exists(metaFile): return
start = datetime.utcfromtimestamp(start) - timedelta(hours=5)
end = datetime.utcfromtimestamp(end) - timedelta(hours=5)
with open(metaFile, 'w') as f:
dump({
'num': weekNum,
#11 am, Tuesday August 17th
'start': start.strftime(f'%-I %P, %A %B %-d{suffix(start.day)}'),
'end': end.strftime(f'%-I %P, %A %B %-d{suffix(end.day)}'),
},f, indent=2)
def updateGuildMeta(folder, weeknumber):
with open(join(folder,'meta.json'),'w') as f:
dump({
'weekNum' : weeknumber
}, f)
return
``` |
{
"source": "4x1md/vx8_aprs_parser",
"score": 2
} |
#### File: src/data_types/metadata.py
```python
class Metadata:
def __init__(self, raw_data):
# Date and time
self.date = raw_data[0:2]
self.time = raw_data[4:5]
# Packet sender
self.sender_callsign = raw_data[8:15]
self.sender_ssid = raw_data[17] + raw_data[19]
self.ascii_callsign = True if (ord(raw_data[18]) & 0b00010000) else False
# Data length
# If packet length is 0xFFFF, it is actually 0
self.pkt_len = ord(raw_data[20]) * 0x100 + ord(raw_data[21])
self.pkt_len = 0 if self.pkt_len == 0xFFFF else self.pkt_len
# Memory cell stores packet (1 if true, 0 if false)
# If in use value is 0xFFFF, it is actually 0
self.in_use = ord(raw_data[22]) * 0x100 + ord(raw_data[23])
self.in_use = True if self.in_use == 1 else False
def __repr__(self):
return '<Metadata callsign=%s-%s, ascii=%s, pkt_len=%s, in_use=%s>' % \
(self.sender_callsign, self.sender_ssid, self.ascii_callsign, self.pkt_len, self.in_use)
``` |
{
"source": "4xin/scrapy-study",
"score": 3
} |
#### File: HourseSpider/spiders/cnspiders.py
```python
import scrapy
from HourseSpider.items import DichanItem
class HourseSpider(scrapy.Spider):
name = "HourseSpider"
allowed_domains = ["cnfce.net"]
def __init__(self, category=None, *args, **kwargs):
super(HourseSpider, self).__init__(*args, **kwargs)
self.start_urls = ['http://www.cnfce.net/sale/page1.html']
def parse(self, response):
data = response.xpath('//div[@id="mainn"]/ul[re:test(@class,"ul[0-1]$")]')
for sel in data:
item = DichanItem()
item['area']= sel.xpath('li[re:test(@class,"lifw[0-1]$")]/a/text()').extract()
item['section']= sel.xpath('li[re:test(@class,"lidd[0-1]$")]/a/text()').extract()
item['type']= sel.xpath('li[re:test(@class,"lifx[0-1]$")]/a/text()').extract()
item['floor']= sel.xpath('li[re:test(@class,"lilc[0-1]$")]/a/text()').extract()
item['roomtype']= sel.xpath('li[re:test(@class,"lihx[0-1]$")]/a/text()').extract()
item['decorate']= sel.xpath('li[re:test(@class,"lizx[0-1]$")]/a/text()').extract()
item['acreage']= sel.xpath('li[re:test(@class,"limj[0-1]$")]/a/text()').extract()
item['unit_price']= sel.xpath('li[re:test(@class,"lidj[0-1]$")]/a/text()').extract()
item['price']= sel.xpath('li[re:test(@class,"lijg[0-1]$")]/a/text()').extract()
item['date']= sel.xpath('li[re:test(@class,"lirq[0-1]$")]/a/text()').extract()
yield item
``` |
{
"source": "4xle/Blender-Addon-Photogrammetry-Importer",
"score": 2
} |
#### File: photogrammetry_importer/file_handlers/point_data_file_handler.py
```python
import os
from photogrammetry_importer.types.point import Point
from photogrammetry_importer.file_handlers.ply_file_handler import PLYFileHandler
from photogrammetry_importer.utility.blender_logging_utility import log_report
class DataSemantics(object):
def __init__(self):
self.x_idx = None
self.y_idx = None
self.z_idx = None
self.r_idx = None
self.g_idx = None
self.b_idx = None
self.pseudo_color = None
def is_color_initialized(self):
return not None in [self.r_idx, self.g_idx, self.b_idx]
def is_int(some_str):
try:
int(some_str)
return True
except ValueError:
return False
def is_float(some_str):
try:
float(some_str)
return True
except ValueError:
return False
class PointDataFileHandler(object):
@staticmethod
def read_lines_as_tuples(ifc, delimiter):
lines_as_tup = []
for line in ifc.readlines():
elements = line.split(delimiter)
lines_as_tup.append(elements)
return lines_as_tup
@staticmethod
def guess_data_semantics(data_tuple):
log_report('INFO', 'Guessing data semantics')
data_semantics = DataSemantics()
# Data must start with subsequent float values
# representing the three-dimensional position
for idx in [0, 1, 2]:
assert is_float(data_tuple[idx])
data_semantics.x_idx = 0
data_semantics.y_idx = 1
data_semantics.z_idx = 2
num_data_entries = len(data_tuple)
# Search for three subsequent int values between 0 and 255
# (which indicate RGB color information)
for idx in [3, num_data_entries-3]:
if not is_int(data_tuple[idx]):
continue
if not is_int(data_tuple[idx + 1]):
continue
if not is_int(data_tuple[idx + 2]):
continue
if not 0 <= int(data_tuple[idx]) <= 255:
continue
if not 0 <= int(data_tuple[idx]) <= 255:
continue
if not 0 <= int(data_tuple[idx]) <= 255:
continue
data_semantics.r_idx = idx
data_semantics.g_idx = idx + 1
data_semantics.b_idx = idx + 2
data_semantics.pseudo_color = False
break
if data_semantics.is_color_initialized():
return data_semantics
# If not int values are found, we assume that the color information
# is stored as pseudo colors, i.e. we are looking for 3 subsequent
# floats between (0,1).
for idx in [3, num_data_entries-3]:
assert is_float(data_tuple[idx])
if not 0 <= float(data_tuple[idx]) <= 1:
continue
if not 0 <= float(data_tuple[idx+1]) <= 1:
continue
if not 0 <= float(data_tuple[idx+2]) <= 1:
continue
data_semantics.r_idx = idx
data_semantics.g_idx = idx + 1
data_semantics.b_idx = idx + 2
data_semantics.pseudo_color = True
break
return data_semantics
@staticmethod
def parse_header(line):
data_semantics = DataSemantics()
data_tuple = line.lstrip('//').rstrip().split(' ')
for idx, val in enumerate(data_tuple):
if val == 'X':
data_semantics.x_idx = idx
elif val == 'Y':
data_semantics.y_idx = idx
elif val == 'Z':
data_semantics.z_idx = idx
elif val == 'R':
data_semantics.r_idx = idx
data_semantics.pseudo_color = False
elif val == 'G':
data_semantics.g_idx = idx
data_semantics.pseudo_color = False
elif val == 'B':
data_semantics.b_idx = idx
data_semantics.pseudo_color = False
elif val == 'Rf':
data_semantics.r_idx = idx
data_semantics.pseudo_color = True
elif val == 'Gf':
data_semantics.g_idx = idx
data_semantics.pseudo_color = True
elif val == 'Bf':
data_semantics.b_idx = idx
data_semantics.pseudo_color = True
return data_semantics
@staticmethod
def parse_asc_or_pts_or_csv(ifp, delimiter, only_data):
points = []
with open(ifp, 'r') as ifc:
data_semantics = None
if not only_data:
line = ifc.readline()
if line.startswith('//'):
data_semantics = PointDataFileHandler.parse_header(line)
line = ifc.readline()
num_points = int(line.strip())
else:
num_points = int(line.strip())
lines_as_tuples = PointDataFileHandler.read_lines_as_tuples(
ifc, delimiter=delimiter)
if data_semantics is None:
# Determine the semantics of the data
data_tuple = lines_as_tuples[0]
data_semantics = PointDataFileHandler.guess_data_semantics(
data_tuple)
if data_semantics.pseudo_color:
factor = 255
else:
factor = 1
for idx, data_tuple in enumerate(lines_as_tuples):
point = Point(
coord=[
float(data_tuple[data_semantics.x_idx]),
float(data_tuple[data_semantics.y_idx]),
float(data_tuple[data_semantics.z_idx])],
color=[
int(factor * float(data_tuple[data_semantics.r_idx])),
int(factor * float(data_tuple[data_semantics.g_idx])),
int(factor * float(data_tuple[data_semantics.b_idx]))],
id=idx,
scalars=None)
points.append(point)
return points
@staticmethod
def parse_point_data_file(ifp):
log_report('INFO', 'Parse Point Data File: ...')
ext = os.path.splitext(ifp)[1].lower()
if ext == '.ply':
points = PLYFileHandler.parse_ply_file(ifp)
elif ext == '.csv':
points = PointDataFileHandler.parse_asc_or_pts_or_csv(
ifp, delimiter=',', only_data=True)
elif ext in ['.asc', '.pts']:
# https://www.cloudcompare.org/doc/wiki/index.php?title=FILE_I/O
points = PointDataFileHandler.parse_asc_or_pts_or_csv(
ifp, delimiter=' ', only_data=False)
else:
log_report('ERROR', 'Extension ' + ext + ' not supported.', self)
assert False
log_report('INFO', 'Parse Point Data File: Done')
return points
```
#### File: photogrammetry_importer/properties/camera_import_properties.py
```python
import bpy
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
FloatVectorProperty)
from photogrammetry_importer.utility.blender_utility import adjust_render_settings_if_possible
from photogrammetry_importer.utility.blender_camera_utility import principal_points_initialized
from photogrammetry_importer.utility.blender_camera_utility import set_principal_point_for_cameras
from photogrammetry_importer.utility.blender_camera_utility import add_cameras
from photogrammetry_importer.utility.blender_camera_utility import add_camera_animation
from photogrammetry_importer.types.camera import Camera
class CameraImportProperties():
""" This class encapsulates Blender UI properties that are required to visualize the reconstructed cameras correctly. """
image_fp_items = [
(Camera.IMAGE_FP_TYPE_NAME, "File Name", "", 1),
(Camera.IMAGE_FP_TYPE_RELATIVE, "Relative Path", "", 2),
(Camera.IMAGE_FP_TYPE_ABSOLUTE, "Absolute Path", "", 3)
]
image_fp_type: EnumProperty(
name="Image File Path Type",
description = "Choose how image file paths are treated, i.e. absolute path, relative path or file name.",
items=image_fp_items)
image_dp: StringProperty(
name="Image Directory",
description = "Assuming the reconstruction result is located in <some/path/rec.ext> or <some/path/colmap_model>. " +
"The addons uses <some/path/images> (if available) or <some/path> as default image path." ,
default=""
# Can not use subtype='DIR_PATH' while importing another file (i.e. .nvm)
)
import_cameras: BoolProperty(
name="Import Cameras",
description = "Import Cameras",
default=True)
default_width: IntProperty(
name="Default Width",
description = "Width, which will be used used if corresponding image is not found.",
default=-1)
default_height: IntProperty(
name="Default Height",
description = "Height, which will be used used if corresponding image is not found.",
default=-1)
default_focal_length: FloatProperty(
name="Focal length in pixel",
description = "Value for missing focal length in LOG (Open3D) file. ",
default=float('nan'))
default_pp_x: FloatProperty(
name="Principal Point X Component",
description = "Principal Point X Component, which will be used if not contained in the NVM (VisualSfM) / LOG (Open3D) file. " + \
"If no value is provided, the principal point is set to the image center.",
default=float('nan'))
default_pp_y: FloatProperty(
name="Principal Point Y Component",
description = "Principal Point Y Component, which will be used if not contained in the NVM (VisualSfM) / LOG (Open3D) file. " + \
"If no value is provided, the principal point is set to the image center.",
default=float('nan'))
add_background_images: BoolProperty(
name="Add a Background Image for each Camera",
description = "The background image is only visible by viewing the scene from a specific camera.",
default=True)
add_image_planes: BoolProperty(
name="Add an Image Plane for each Camera",
description = "Add an Image Plane for each Camera - only for non-panoramic cameras.",
default=False)
add_image_plane_emission: BoolProperty(
name="Add Image Plane Color Emission",
description = "Add image plane color emission to increase the visibility of the image planes.",
default=True)
image_plane_transparency: FloatProperty(
name="Image Plane Transparency Value",
description = "Transparency value of the image planes: 0 = invisible, 1 = opaque.",
default=0.5,
min=0,
max=1)
add_depth_maps_as_point_cloud: BoolProperty(
name="Add Depth Maps (experimental)",
description = "Add the depth map (if available) as point cloud for each Camera",
default=False)
use_default_depth_map_color: BoolProperty(
name="Use Default Depth Map Color",
description = "If not selected, each depth map is colorized with a different (random) color.",
default=False)
depth_map_default_color: FloatVectorProperty(
name="Depth Map Color",
description="Depth map color.",
subtype='COLOR',
size=3, # RGBA colors are not compatible with the GPU Module
default=(0.0, 1.0, 0.0),
min=0.0,
max=1.0
)
depth_map_display_sparsity: IntProperty(
name="Depth Map Display Sparsity",
description = "Adjust the sparsity of the depth maps. A value of 10 means, " +
"that every 10th depth map value is converted to a 3D point.",
default=10)
add_camera_motion_as_animation: BoolProperty(
name="Add Camera Motion as Animation",
description = "Add an animation reflecting the camera motion. " +
" The order of the cameras is determined by the corresponding file name.",
default=True)
number_interpolation_frames: IntProperty(
name="Number of Frames Between two Reconstructed Cameras",
description = "The poses of the animated camera are interpolated.",
default=0,
min=0)
interpolation_items = [
("LINEAR", "LINEAR", "", 1),
("BEZIER", "BEZIER", "", 2),
("SINE", "SINE", "", 3),
("QUAD", "QUAD", "", 4),
("CUBIC", "CUBIC", "", 5),
("QUART", "QUART", "", 6),
("QUINT", "QUINT", "", 7),
("EXPO", "EXPO", "", 8),
("CIRC", "CIRC", "", 9),
("BACK", "BACK", "", 10),
("BOUNCE", "BOUNCE", "", 11),
("ELASTIC", "ELASTIC", "", 12),
("CONSTANT", "CONSTANT", "", 13)
]
interpolation_type: EnumProperty(
name="Interpolation Type",
description = "Blender string that defines the type of the interpolation.",
items=interpolation_items)
consider_missing_cameras_during_animation: BoolProperty(
name="Adjust Frame Numbers of Camera Animation",
description = "Assume there are three consecutive images A,B and C, but only A and C have been reconstructed. " +
"This option adjusts the frame number of C and the number of interpolation frames between camera A and C.",
default=True)
remove_rotation_discontinuities: BoolProperty(
name="Remove Rotation Discontinuities",
description = "The addon uses quaternions q to represent the rotation." +
"A quaternion q and its negative -q describe the same rotation. " +
"This option allows to remove different signs.",
default=True)
suppress_distortion_warnings: BoolProperty(
name="Suppress Distortion Warnings",
description = "Radial distortion might lead to incorrect alignments of cameras and points. " +
"Enable this option to suppress corresponding warnings. " +
"If possible, consider to re-compute the reconstruction using a camera model without radial distortion.",
default=False)
adjust_render_settings: BoolProperty(
name="Adjust Render Settings",
description = "Adjust the render settings according to the corresponding images. " +
"All images have to be captured with the same device). " +
"If disabled the visualization of the camera cone in 3D view might be incorrect.",
default=True)
camera_extent: FloatProperty(
name="Initial Camera Extent (in Blender Units)",
description = "Initial Camera Extent (Visualization)",
default=1)
def draw_camera_options(self,
layout,
draw_image_fp=True,
draw_depth_map_import=False,
draw_image_size=False,
draw_principal_point=False,
draw_focal_length=False,
draw_everything=False):
camera_box = layout.box()
if draw_image_fp or draw_everything:
camera_box.prop(self, "image_fp_type")
if self.image_fp_type == "NAME" or self.image_fp_type == "RELATIVE" or draw_everything:
camera_box.prop(self, "image_dp")
if draw_focal_length or draw_image_size or draw_principal_point or draw_everything:
image_box = camera_box.box()
if draw_focal_length or draw_everything:
image_box.prop(self, "default_focal_length")
if draw_image_size or draw_everything:
image_box.prop(self, "default_width")
image_box.prop(self, "default_height")
if draw_principal_point or draw_everything:
image_box.prop(self, "default_pp_x")
image_box.prop(self, "default_pp_y")
import_camera_box = camera_box.box()
import_camera_box.prop(self, "import_cameras")
if self.import_cameras or draw_everything:
import_camera_box.prop(self, "camera_extent")
import_camera_box.prop(self, "add_background_images")
image_plane_box = import_camera_box.box()
image_plane_box.prop(self, "add_image_planes")
if self.add_image_planes or draw_everything:
image_plane_box.prop(self, "add_image_plane_emission")
image_plane_box.prop(self, "image_plane_transparency")
if draw_depth_map_import or draw_everything:
depth_map_box = import_camera_box.box()
depth_map_box.prop(self, "add_depth_maps_as_point_cloud")
if self.add_depth_maps_as_point_cloud or draw_everything:
depth_map_box.prop(self, "use_default_depth_map_color")
if self.use_default_depth_map_color:
depth_map_box.prop(self, "depth_map_default_color")
depth_map_box.prop(self, "depth_map_display_sparsity")
box = camera_box.box()
box.prop(self, "add_camera_motion_as_animation")
if self.add_camera_motion_as_animation or draw_everything:
box.prop(self, "number_interpolation_frames")
box.prop(self, "interpolation_type")
box.prop(self, "consider_missing_cameras_during_animation")
box.prop(self, "remove_rotation_discontinuities")
camera_box.prop(self, "suppress_distortion_warnings")
camera_box.prop(self, "adjust_render_settings")
def enhance_camera_with_intrinsics(self, cameras):
# This function should be overwritten,
# if the intrinsic parameters are not part of the reconstruction data
# (e.g. log file)
success = True
return cameras, success
def enhance_camera_with_images(self, cameras):
# This function should be overwritten,
# if image size is not part of the reconstruction data
# (e.g. nvm file)
success = True
return cameras, success
def import_photogrammetry_cameras(self, cameras, parent_collection):
if self.import_cameras or self.add_camera_motion_as_animation:
cameras, success = self.enhance_camera_with_images(cameras)
if success:
cameras, success = self.enhance_camera_with_intrinsics(cameras)
if success:
# The principal point information may be provided in the reconstruction data
if not principal_points_initialized(cameras):
set_principal_point_for_cameras(
cameras,
self.default_pp_x,
self.default_pp_y,
self)
if self.adjust_render_settings:
adjust_render_settings_if_possible(
self,
cameras)
if self.import_cameras:
add_cameras(
self,
cameras,
parent_collection,
image_dp=self.image_dp,
add_background_images=self.add_background_images,
add_image_planes=self.add_image_planes,
add_depth_maps_as_point_cloud=self.add_depth_maps_as_point_cloud,
camera_scale=self.camera_extent,
image_plane_transparency=self.image_plane_transparency,
add_image_plane_emission=self.add_image_plane_emission,
use_default_depth_map_color=self.use_default_depth_map_color,
depth_map_default_color=self.depth_map_default_color,
depth_map_display_sparsity=self.depth_map_display_sparsity)
if self.add_camera_motion_as_animation:
add_camera_animation(
self,
cameras,
parent_collection,
self.number_interpolation_frames,
self.interpolation_type,
self.consider_missing_cameras_during_animation,
self.remove_rotation_discontinuities,
self.image_dp,
self.image_fp_type)
else:
return {'FINISHED'}
``` |
{
"source": "4ybaka/py-nrf24",
"score": 3
} |
#### File: src/nrf24/nrf24.py
```python
import pigpio
from enum import Enum, IntEnum
from os import environ as env
class RF24_PA(IntEnum):
MIN = 0,
LOW = 1,
HIGH = 2,
MAX = 3,
ERROR = 4
@staticmethod
def from_value(value):
if value is None:
raise ValueError(f'"None" is not a RF24_PA value.')
if isinstance(value, RF24_PA):
return value
elif isinstance(value, int):
for e in RF24_PA:
if value == e.value:
return e
raise ValueError(f'Value {value} is not a RF24_PA value.')
elif isinstance(value, str):
for e in RF24_PA:
if value.lower() == e.name.lower():
return e
raise ValueError(f'Value {value} is not a RF24_PA name.')
else:
raise ValueError(f'{value} ({type(value)}) is not a RF24_PA value.')
class RF24_DATA_RATE(IntEnum):
RATE_1MBPS = 0,
RATE_2MBPS = 1,
RATE_250KBPS = 2
@staticmethod
def from_value(value):
if value is None:
raise ValueError(f'"None" is not a RF24_DATA_RATE value.')
if isinstance(value, RF24_DATA_RATE):
return value
elif isinstance(value, int):
for e in RF24_DATA_RATE:
if value == e.value:
return e
raise ValueError(f'Value {value} is not a RF24_DATA_RATE value.')
elif isinstance(value, str):
for e in RF24_DATA_RATE:
if value.lower() == e.name.lower():
return e
raise ValueError(f'Value {value} is not a RF24_DATA_RATE name.')
else:
raise ValueError(f'{value} ({type(value)}) is not a RF24_DATA_RATE value.')
class RF24_CRC(IntEnum):
DISABLED = 0,
BYTES_1 = 1,
BYTES_2 = 2
@staticmethod
def from_value(value):
if value is None:
raise ValueError(f'"None" is not a RF24_CRC value.')
if isinstance(value, RF24_CRC):
return value
elif isinstance(value, int):
for e in RF24_CRC:
if value == e.value:
return e
raise ValueError(f'Value {value} is not a RF24_CRC value.')
elif isinstance(value, str):
for e in RF24_CRC:
if value.lower() == e.name.lower():
return e
raise ValueError(f'Value {value} is not a RF24_CRC name.')
else:
raise ValueError(f'{value} ({type(value)}) is not an RF24_CRC value.')
class RF24_PAYLOAD(IntEnum):
ACK = -1
DYNAMIC = 0
MIN = 1
MAX = 32
@staticmethod
def from_value(value):
if value is None:
raise ValueError(f'"None" is not a RF24_PAYLOAD value.')
if isinstance(value, RF24_PAYLOAD):
return value
elif isinstance(value, int):
for e in RF24_PAYLOAD:
if value == e.value:
return e
if value >= RF24_PAYLOAD.ACK and value <= RF24_PAYLOAD.MAX:
return value
raise ValueError(f'Value {value} is not a RF24_PAYLOAD value.')
elif isinstance(value, str):
for e in RF24_PAYLOAD:
if value.lower() == e.name.lower():
return e
raise ValueError(f'Value {value} is not a RF24_PAYLOAD name.')
else:
raise ValueError(f'{value} ({type(value)}) is not an RF24_PAYLOAD value.')
class SPI_CHANNEL(IntEnum):
MAIN_CE0 = 0
MAIN_CE1 = 1
AUX_CE0 = 2
AUX_CE1 = 3
AUX_CE2 = 4
@staticmethod
def from_value(value):
if value is None:
raise ValueError(f'"None" is not a SPI_CHANNEL value.')
if isinstance(value, SPI_CHANNEL):
return value
elif isinstance(value, int):
for e in SPI_CHANNEL:
if value == e.value:
return e
raise ValueError(f'Value {value} is not a SPI_CHANNEL value.')
elif isinstance(value, str):
for e in RF24_PAYLOAD:
if value.lower() == e.name.lower():
return e
raise ValueError(f'Value {value} is not a SPI_CHANNEL name.')
else:
raise ValueError(f'{value} ({type(value)}) is not an SPI_CHANNEL value.')
class RF24_RX_ADDR(IntEnum):
P0 = 0x0a,
P1 = 0x0b,
P2 = 0x0c,
P3 = 0x0d,
P4 = 0x0e,
P5 = 0x0f
class NRF24:
"""
Note that RX and TX addresses must match
Note that communication channels must match:
Note that payload size must match:
The following table describes how to configure the operational
modes.
+----------+--------+---------+--------+-----------------------------+
|Mode | PWR_UP | PRIM_RX | CE pin | FIFO state |
+----------+--------+---------+--------+-----------------------------+
|RX mode | 1 | 1 | 1 | --- |
+----------+--------+---------+--------+-----------------------------+
|TX mode | 1 | 0 | 1 | Data in TX FIFOs. Will empty|
| | | | | all levels in TX FIFOs |
+----------+--------+---------+--------+-----------------------------+
|TX mode | 1 | 0 | >10us | Data in TX FIFOs. Will empty|
| | | | pulse | one level in TX FIFOs |
+----------+--------+---------+--------+-----------------------------+
|Standby-II| 1 | 0 | 1 | TX FIFO empty |
+----------+--------+---------+--------+-----------------------------+
|Standby-I | 1 | --- | 0 | No ongoing transmission |
+----------+--------+---------+--------+-----------------------------+
|Power Down| 0 | --- | --- | --- |
+----------+--------+---------+--------+-----------------------------+
"""
TX = 0
RX = 1
@staticmethod
def from_config(config, pi=None, pigpio_section='pigpio', nrf24_section='nrf24'):
# Get pigpio configuration.
if pi is None:
host = config[pigpio_section].get('host', env.get('PIGPIO_HOST', 'localhost'))
port = config[pigpio_section].getint('port', env.get('PIGPIO_PORT', 8888))
pi = pigpio.pi(host, port)
# Get nrf24 configuration
ce_pin = config[nrf24_section].getint('ce_pin', 25)
spi_channel = SPI_CHANNEL.from_value(config[nrf24_section].get('spi_channel', SPI_CHANNEL.MAIN_CE0))
spi_speed = config[nrf24_section].getint('spi_speed', 50e4)
data_rate = RF24_DATA_RATE.from_value(config['nrf24'].get('data_rate', RF24_DATA_RATE.RATE_1MBPS))
channel = config[nrf24_section].getint('channel', 76)
payload_size = RF24_PAYLOAD.from_value(config[nrf24_section].get('payload_size', RF24_PAYLOAD.MAX))
address_bytes = config[nrf24_section].getint('address_size', 5)
crc_bytes = RF24_CRC.from_value(config[nrf24_section].getint('address_size', RF24_CRC.BYTES_2))
pad = config[nrf24_section].getint('pad', 32)
nrf = NRF24(pi, ce=ce_pin, spi_channel=spi_channel, spi_speed=spi_speed,data_rate=data_rate,channel=channel,payload_size=payload_size,address_bytes=address_bytes,crc_bytes=crc_bytes,pad=pad)
if config[nrf24_section].get('tx_addr', None) is not None:
nrf.open_writing_pipe(config[nrf24_section].get('tx_addr', None))
if config[nrf24_section].get('rx_addr_0', None) is not None:
nrf.open_reading_pipe(0, config[nrf24_section].get('rx_addr_0', None))
if config[nrf24_section].get(nrf24_section, None) is not None:
nrf.open_reading_pipe(1, config[nrf24_section].get('rx_addr_1', None))
if config[nrf24_section].get('rx_addr_2', None) is not None:
nrf.open_reading_pipe(2, config[nrf24_section].get('rx_addr_2', None))
if config[nrf24_section].get('rx_addr_3', None) is not None:
nrf.open_reading_pipe(3, config[nrf24_section].get('rx_addr_3', None))
if config[nrf24_section].get('rx_addr_4', None) is not None:
nrf.open_reading_pipe(4, config[nrf24_section].get('rx_addr_4', None))
if config[nrf24_section].get('rx_addr_5', None) is not None:
nrf.open_reading_pipe(5, config[nrf24_section].get('rx_addr_5', None))
return nrf, pi
def __init__(self,
pi, # pigpio Raspberry PI connection
ce, # GPIO for chip enable
spi_channel=SPI_CHANNEL.MAIN_CE0, # SPI channel
spi_speed=50e3, # SPI bps 50.000 = 50 Mhz
data_rate=RF24_DATA_RATE.RATE_1MBPS, # Default data rate is 1 Mbits.
channel=76, # Radio channel
payload_size=RF24_PAYLOAD.MAX, # Message size in bytes (default: 32)
address_bytes=5, # RX/TX address length in bytes
crc_bytes=RF24_CRC.BYTES_2, # Number of CRC bytes
pad=32 # Value used to pad short messages
):
"""
Instantiate with the Pi to which the card reader is connected.
Optionally the SPI channel may be specified. The default is
main SPI channel 0.
The following constants may be used to define the channel:
SPI_MAIN_CE0 - main SPI channel 0
SPI_MAIN_CE1 - main SPI channel 1
SPI_AUX_CE0 - aux SPI channel 0
SPI_AUX_CE1 - aux SPI channel 1
SPI_AUX_CE2 - aux SPI channel 2
"""
self._pi = pi
# Chip Enable can be any PIN (~).
assert 0 <= ce <= 31
self._ce_pin = ce
pi.set_mode(ce, pigpio.OUTPUT)
self.unset_ce()
# SPI Channel
assert spi_channel >= SPI_CHANNEL.MAIN_CE0 and spi_channel <= SPI_CHANNEL.AUX_CE2
# SPI speed between 32 KHz and 10 MHz
assert 32000 <= spi_speed <= 10e6
# Access SPI on the Raspberry PI.
if spi_channel < SPI_CHANNEL.AUX_CE0: # WAS: NRF24.SPI_AUX_CE0:
# Main SPI
self._spi_handle = pi.spi_open(spi_channel, int(spi_speed))
else:
# Aux SPI.
self._spi_handle = pi.spi_open(spi_channel - SPI_CHANNEL.AUX_CE0, int(spi_speed), NRF24._AUX_SPI)
# NRF data rate
self._data_rate = data_rate
self.set_data_rate(data_rate)
# NRF channel (0-125)
self._channel = 0
self.set_channel(channel)
# NRF Payload size. -1 = Acknowledgement payload, 0 = Dynamic payload size, 1 - 32 = Payload size in bytes.
self._payload_size = 0
self.set_payload_size(payload_size)
# NRF Address width in bytes. Shorter addresses will be padded using the padding above.
self._address_width = 5
self.set_address_bytes(address_bytes)
# NRF CRC bytes. Range 0 - 2.
self._crc_bytes = 1
self.set_crc_bytes(crc_bytes)
# Padding for messages and addresses.
self._padding = ord(' ')
self.set_padding(pad)
# NRF Power Tx
self._power_tx = 0
# Initialize NRF.
self.power_down()
self._nrf_write_reg(self.SETUP_RETR, 0b11111)
self.flush_rx()
self.flush_tx()
self.power_up_rx()
def set_channel(self, channel):
assert 0 <= channel <= 125
self._channel = channel # frequency (2400 + channel) MHz
self._nrf_write_reg(self.RF_CH, self._channel)
def set_payload_size(self, payload):
assert RF24_PAYLOAD.ACK <= payload <= RF24_PAYLOAD.MAX
self._payload_size = payload # 0 is dynamic payload
self._configure_payload()
def set_padding(self, pad):
try:
self._padding = ord(pad)
except:
self._padding = pad
assert 0 <= self._padding <= 255
def set_address_bytes(self, address_bytes):
assert 3 <= address_bytes <= 5
self._address_width = address_bytes
self._nrf_write_reg(self.SETUP_AW, self._address_width - 2)
def set_crc_bytes(self, crc_bytes):
assert 1 <= crc_bytes <= 2
if crc_bytes == 1:
self._crc_bytes = 0
else:
self._crc_bytes = self.CRCO
def set_data_rate(self, rate):
# RF24_1MBPS = 0
# RF24_2MBPS = 1
# RF24_250KBPS = 2
assert RF24_DATA_RATE.RATE_1MBPS <= rate <= RF24_DATA_RATE.RATE_250KBPS
# Read current setup value from register.
value = self._nrf_read_reg(self.RF_SETUP, 1)[0]
# Reset RF_DR_LOW and RF_DR_HIGH to 00 which is 1 Mbps (default)
value &= ~(NRF24.RF_DR_LOW | NRF24.RF_DR_HIGH)
# Set the RF_DR_LOW bit if speed is 250 Kbps
if rate == RF24_DATA_RATE.RATE_250KBPS: # WAS NRF24.RF24_250KBPS:
value |= NRF24.RF_DR_LOW
# Set the RF_DR_HIGH bit if speed is 2 Mbps
elif rate == RF24_DATA_RATE.RATE_2MBPS: # WAS:NRF24.RF24_2MBPS:
value |= NRF24.RF_DR_HIGH
# Write value back to setup register.
self._nrf_write_reg(self.RF_SETUP, value)
def set_pa_level(self, level):
#uint8_t setup = read_register(RF_SETUP) & 0xF8
#if (level > 3) { // If invalid level, go to max PA
# level = (RF24_PA_MAX << 1) + 1; // +1 to support the SI24R1 chip extra bit
#} else {
# level = (level << 1) + 1; // Else set level as requested
#}
#write_register(RF_SETUP, setup |= level); // Write it to the chip
if not isinstance(level, int):
raise ValueError("PA level must be int.")
if level < RF24_PA.MIN or level > RF24_PA.MAX:
level = (RF24_PA.MAX << 1) + 1
else:
level = (level << 1) + 1
value = self._nrf_read_reg(NRF24.RF_SETUP, 1)[0]
value &= 0xf8
value |= level
self._nrf_write_reg(NRF24.RF_SETUP, value)
def get_pa_level(self):
#return (read_register(RF_SETUP) & (_BV(RF_PWR_LOW) | _BV(RF_PWR_HIGH))) >> 1;
value = self._nrf_read_reg(NRF24.RF_SETUP, 1)[0]
value &= (NRF24.RF_PWR_LOW | NRF24.RF_PWR_HIGH)
value >>= 1
return RF24_PA(value)
def get_spi_handle(self):
return self._spi_handle
def show_registers(self):
print("Registers:")
print("----------")
print(self.format_config())
print(self.format_en_aa())
print(self.format_en_rxaddr())
print(self.format_setup_aw())
print(self.format_setup_retr())
print(self.format_rf_ch())
print(self.format_rf_setup())
print(self.format_status())
print(self.format_observe_tx())
print(self.format_rpd())
print(self.format_rx_addr_px())
print(self.format_tx_addr())
print(self.format_rx_pw_px())
print(self.format_fifo_status())
print(self.format_dynpd())
print(self.format_feature())
print("----------")
def _make_fixed_width(self, msg, width, pad):
if isinstance(msg, str):
msg = map(ord, msg)
msg = list(msg)
if len(msg) >= width:
return msg[:width]
else:
msg.extend([pad] * (width - len(msg)))
return msg
def send(self, data):
# We expect a list of byte values to be sent. However, popular types
# such as string, integer, bytes, and bytearray are handled automatically using
# this conversion code.
if not isinstance(data, list):
if isinstance(data, str):
data = list(map(ord, data))
elif isinstance(data, int):
data = list(data.to_bytes(-(-data.bit_length() // 8), 'little'))
else:
data = list(data)
status = self.get_status()
if status & (self.TX_FULL | self.MAX_RT):
self.flush_tx()
if self._payload_size >= RF24_PAYLOAD.MIN: # fixed payload
data = self._make_fixed_width(data, self._payload_size, self._padding)
self._nrf_command([self.W_TX_PAYLOAD] + data)
self.power_up_tx()
# Added 2020-05-01, <NAME>
def reset_plos(self):
v = self._nrf_read_reg(NRF24.RF_CH, 1)[0]
self._nrf_write_reg(NRF24.RF_CH, v)
# Changed 2020-05-01, <NAME>
# WAS:
# def ack_payload(self, data):
# self._nrf_command([self.W_ACK_PAYLOAD] + data)
def ack_payload(self, pipe, data):
self._nrf_command([self.W_ACK_PAYLOAD | (pipe & 0x07)] + data)
def open_writing_pipe(self, address):
addr = self._make_fixed_width(address, self._address_width, self._padding)
self.unset_ce()
self._nrf_write_reg(self.TX_ADDR, addr)
self._nrf_write_reg(self.RX_ADDR_P0, addr)
self.set_ce()
def open_reading_pipe(self, pipe, address):
# pipe: RX_ADDR_P0, RX_ADDR_P1, RX_ADDR_P2, RX_ADDR_P3, RX_ADDR_P4, RX_ADDR_P5
# address: max 5 bytes for P0 and P1, max 1 byte for P2, P3, P4, and P5
# Validate pipe input.
if not (isinstance(pipe, int) or isinstance(pipe, RF24_RX_ADDR)):
raise ValueError(f"pipe must be int or RF24_RX_ADDR enum.")
# If a pipe is given as 0..5 add the 0x0a value corresponding to RX_ADDR_P0
if (pipe >= 0 and pipe <= 5):
pipe = pipe + NRF24.RX_ADDR_P0
if (pipe < NRF24.RX_ADDR_P0 or pipe > NRF24.RX_ADDR_P5):
raise ValueError(f"pipe out of range ({NRF24.RX_ADDR_P0:02x} <= pipe <= and {NRF24.RX_ADDR_P5:02x}).")
# Adjust address.
addr = self._make_fixed_width(address, self._address_width, self._padding)
if pipe > 0x0b:
addr = addr[:1]
# Update address on NRF24L01 module.
en_rxaddr = self._nrf_read_reg(NRF24.EN_RXADDR, 1)[0]
enable = 1 << (pipe - NRF24.RX_ADDR_P0)
self.unset_ce()
self._nrf_write_reg(pipe, addr)
self._nrf_write_reg(NRF24.EN_RXADDR, en_rxaddr | enable)
self.set_ce()
def set_local_address(self, address):
addr = self._make_fixed_width(address, self._address_width, self._padding)
self.unset_ce()
self._nrf_write_reg(NRF24.RX_ADDR_P1, addr)
self.set_ce()
def set_remote_address(self, raddr):
addr = self._make_fixed_width(raddr, self._address_width, self._padding)
self.unset_ce()
self._nrf_write_reg(self.TX_ADDR, addr)
self._nrf_write_reg(self.RX_ADDR_P0, addr) # Required for automatic acknowledgements.
self.set_ce()
def data_ready_pipe(self):
status = self.get_status()
pipe = (status >> 1) & 0x07
if status & self.RX_DR:
return True, pipe
fifo_status = self._nrf_read_reg(self.FIFO_STATUS, 1)[0]
if fifo_status & self.FRX_EMPTY:
return False, pipe
else:
return True, pipe
def data_pipe(self):
status = self.get_status()
pipe = (status >> 1) & 0x07
return pipe
def data_ready(self):
status = self.get_status()
if status & self.RX_DR:
return True
fifo_status = self._nrf_read_reg(self.FIFO_STATUS, 1)[0]
if fifo_status & self.FRX_EMPTY:
return False
else:
return True
def is_sending(self):
if self._power_tx > 0:
status = self.get_status()
if status & (self.TX_DS | self.MAX_RT):
self.power_up_rx()
return False
return True
return False
def get_payload(self):
if self._payload_size < RF24_PAYLOAD.MIN:
# dynamic payload
bytes_count = self._nrf_command([self.R_RX_PL_WID, 0])[1]
else:
# fixed payload
bytes_count = self._payload_size
d = self._nrf_read_reg(self.R_RX_PAYLOAD, bytes_count)
self.unset_ce() # added
self._nrf_write_reg(self.STATUS, self.RX_DR)
self.set_ce() # added
return d
def get_status(self):
return self._nrf_command(self.NOP)[0]
def power_up_tx(self):
self.unset_ce()
self._power_tx = 1
config = self.EN_CRC | self._crc_bytes | self.PWR_UP
self._nrf_write_reg(self.CONFIG, config)
self._nrf_write_reg(self.STATUS, self.RX_DR | self.TX_DS | self.MAX_RT)
self.set_ce()
def power_up_rx(self):
self.unset_ce()
self._power_tx = 0
config = self.EN_CRC | self._crc_bytes | self.PWR_UP | self.PRIM_RX
self._nrf_write_reg(self.CONFIG, config)
self._nrf_write_reg(self.STATUS, self.RX_DR | self.TX_DS | self.MAX_RT)
self.set_ce()
def power_down(self):
self.unset_ce()
self._nrf_write_reg(self.CONFIG, self.EN_CRC | self._crc_bytes)
def set_ce(self):
self._pi.write(self._ce_pin, 1)
def unset_ce(self):
self._pi.write(self._ce_pin, 0)
def flush_rx(self):
self._nrf_command(self.FLUSH_RX)
def flush_tx(self):
self._nrf_command(self.FLUSH_TX)
def _nrf_xfer(self, data):
b, d = self._pi.spi_xfer(self._spi_handle, data)
return d
def _nrf_command(self, arg):
if type(arg) is not list:
arg = [arg]
return self._nrf_xfer(arg)
def _nrf_read_reg(self, reg, count):
return self._nrf_xfer([reg] + [0] * count)[1:]
def _nrf_write_reg(self, reg, arg):
"""
Write arg (which may be one or more bytes) to reg.
This function is only permitted in a powerdown or
standby mode.
"""
if type(arg) is not list:
arg = [arg]
self._nrf_xfer([self.W_REGISTER | reg] + arg)
def _configure_payload(self):
if self._payload_size >= RF24_PAYLOAD.MIN: # fixed payload
self._nrf_write_reg(NRF24.RX_PW_P0, self._payload_size)
self._nrf_write_reg(NRF24.RX_PW_P1, self._payload_size)
self._nrf_write_reg(NRF24.RX_PW_P2, self._payload_size)
self._nrf_write_reg(NRF24.RX_PW_P3, self._payload_size)
self._nrf_write_reg(NRF24.RX_PW_P4, self._payload_size)
self._nrf_write_reg(NRF24.RX_PW_P5, self._payload_size)
self._nrf_write_reg(NRF24.DYNPD, 0)
self._nrf_write_reg(NRF24.FEATURE, 0)
else: # dynamic payload
self._nrf_write_reg(NRF24.RX_PW_P0, 0)
self._nrf_write_reg(NRF24.RX_PW_P1, 0)
self._nrf_write_reg(NRF24.RX_PW_P2, 0)
self._nrf_write_reg(NRF24.RX_PW_P3, 0)
self._nrf_write_reg(NRF24.RX_PW_P4, 0)
self._nrf_write_reg(NRF24.RX_PW_P5, 0)
self._nrf_write_reg(NRF24.DYNPD, NRF24.DPL_P0 | NRF24.DPL_P1 | NRF24.DPL_P2 | NRF24.DPL_P3 | NRF24.DPL_P4 | NRF24.DPL_P5 | NRF24.DPL_P6 | NRF24.DPL_P7)
if self._payload_size == RF24_PAYLOAD.ACK:
self._nrf_write_reg(NRF24.FEATURE, NRF24.EN_DPL | NRF24.EN_ACK_PAY)
else:
self._nrf_write_reg(NRF24.FEATURE, NRF24.EN_DPL)
# Constants related to NRF24 configuration/operation.
_AUX_SPI = (1 << 8)
R_REGISTER = 0x00 # reg in bits 0-4, read 1-5 bytes
W_REGISTER = 0x20 # reg in bits 0-4, write 1-5 bytes
R_RX_PL_WID = 0x60
R_RX_PAYLOAD = 0x61 # read 1-32 bytes
W_TX_PAYLOAD = 0xA0 # write 1-32 bytes
W_ACK_PAYLOAD = 0xA8 # pipe in bits 0-2, write 1-32 bytes
W_TX_PAYLOAD_NO_ACK = 0xB0 # no ACK, write 1-32 bytes
FLUSH_TX = 0xE1
FLUSH_RX = 0xE2
REUSE_TX_PL = 0xE3
NOP = 0xFF # no operation
CONFIG = 0x00
EN_AA = 0x01
EN_RXADDR = 0x02
SETUP_AW = 0x03
SETUP_RETR = 0x04
RF_CH = 0x05
RF_SETUP = 0x06
STATUS = 0x07
OBSERVE_TX = 0x08
RPD = 0x09
RX_ADDR_P0 = 0x0A
RX_ADDR_P1 = 0x0B
RX_ADDR_P2 = 0x0C
RX_ADDR_P3 = 0x0D
RX_ADDR_P4 = 0x0E
RX_ADDR_P5 = 0x0F
TX_ADDR = 0x10
RX_PW_P0 = 0x11
RX_PW_P1 = 0x12
RX_PW_P2 = 0x13
RX_PW_P3 = 0x14
RX_PW_P4 = 0x15
RX_PW_P5 = 0x16
FIFO_STATUS = 0x17
DYNPD = 0x1C
FEATURE = 0x1D
# CONFIG
MASK_RX_DR = 1 << 6
MASK_TX_DS = 1 << 5
MASK_MAX_RT = 1 << 4
EN_CRC = 1 << 3 # 8 (default)
CRCO = 1 << 2 # 4
PWR_UP = 1 << 1 # 2
PRIM_RX = 1 << 0 # 1
def format_config(self):
v = self._nrf_read_reg(NRF24.CONFIG, 1)[0]
s = f"CONFIG: (0x{v:02x}) => "
if v & NRF24.MASK_RX_DR:
s += "no RX_DR IRQ, "
else:
s += "RX_DR IRQ, "
if v & NRF24.MASK_TX_DS:
s += "no TX_DS IRQ, "
else:
s += "TX_DS IRQ, "
if v & NRF24.MASK_MAX_RT:
s += "no MAX_RT IRQ, "
else:
s += "MAX_RT IRQ, "
if v & NRF24.EN_CRC:
s += "CRC on, "
else:
s += "CRC off, "
if v & NRF24.CRCO:
s += "CRC 2 byte, "
else:
s += "CRC 1 byte, "
if v & NRF24.PWR_UP:
s += "Power up, "
else:
s += "Power down, "
if v & NRF24.PRIM_RX:
s += "RX"
else:
s += "TX"
return s
# EN_AA
ENAA_P5 = 1 << 5 # default
ENAA_P4 = 1 << 4 # default
ENAA_P3 = 1 << 3 # default
ENAA_P2 = 1 << 2 # default
ENAA_P1 = 1 << 1 # default
ENAA_P0 = 1 << 0 # default
def format_en_aa(self):
v = self._nrf_read_reg(NRF24.EN_AA, 1)[0]
s = f"EN_AA: (0x{v:02x}) => "
for i in range(6):
if v & (1 << i):
s += f"P{i}:ACK "
else:
s += f"P{i}:no ACK "
return s
# EN_RXADDR
ERX_P5 = 1 << 5
ERX_P4 = 1 << 4
ERX_P3 = 1 << 3
ERX_P2 = 1 << 2
ERX_P1 = 1 << 1 # default
ERX_P0 = 1 << 0 # default
def format_en_rxaddr(self):
v = self._nrf_read_reg(NRF24.EN_RXADDR, 1)[0]
s = f"EN_RXADDR: (0x{v:02x}) => "
for i in range(6):
if v & (1 << i):
s += f"P{i}:on "
else:
s += f"P{i}:off "
return s
# SETUP_AW (Address width)
AW_3 = 1
AW_4 = 2
AW_5 = 3 # default
def format_setup_aw(self):
v = self._nrf_read_reg(NRF24.SETUP_AW, 1)[0]
s = f"SETUP_AW: (0x{v:02x}) => address width bytes "
if v == NRF24.AW_3:
s += "3"
elif v == NRF24.AW_4:
s += "4"
elif v == NRF24.AW_5:
s += "5"
else:
s += "invalid"
return s
# SETUP_RETR (Retry delay and retries)
# ARD 7-4
# ARC 3-0
def format_setup_retr(self):
v = self._nrf_read_reg(NRF24.SETUP_RETR, 1)[0]
ard = (((v >> 4) & 15) * 250) + 250
arc = v & 15
s = f"SETUP_RETR: (0x{v:02x}) => retry delay {ard} us, retries {arc}"
return s
# RF_CH (Channel)
# RF_CH 6-0
def format_rf_ch(self):
v = self._nrf_read_reg(NRF24.RF_CH, 1)[0]
s = f"RF_CH: (0x{v:02x}) => channel={v & 127}"
return s
# RF_SETUP
CONT_WAVE = 1 << 7
RF_DR_LOW = 1 << 5
PLL_LOCK = 1 << 4
RF_DR_HIGH = 1 << 3
RF_PWR_LOW = 1 << 1
RF_PWR_HIGH = 1 << 2
# RF_PWR 2-1
def format_rf_setup(self):
v = self._nrf_read_reg(NRF24.RF_SETUP, 1)[0]
s = f"RF_SETUP: (0x{v:02x}) => "
if v & NRF24.CONT_WAVE:
s += "continuos carrier on, "
else:
s += "no continuous carrier, "
if v & NRF24.PLL_LOCK:
s += "force PLL lock on, "
else:
s += "no force PLL lock, "
dr = 0
if v & NRF24.RF_DR_LOW:
dr += 2
if v & NRF24.RF_DR_HIGH:
dr += 1
if dr == 0:
s += "1 Mbps, "
elif dr == 1:
s += "2 Mbps, "
elif dr == 2:
s += "250 kbps, "
else:
s += "illegal speed, "
pwr = (v >> 1) & 3
if pwr == 0:
s += "-18 dBm"
elif pwr == 1:
s += "-12 dBm"
elif pwr == 2:
s += "-6 dBm"
else:
s += "0 dBm"
return s
# STATUS
RX_DR = 1 << 6
TX_DS = 1 << 5
MAX_RT = 1 << 4
# RX_P_NO 3-1
RX_P_NO = 1
TX_FULL = 1 << 0
def format_status(self):
v = self._nrf_read_reg(NRF24.STATUS, 1)[0]
s = f"STATUS: (0x{v:02x}) => "
if v & NRF24.RX_DR:
s += "RX data, "
else:
s += "no RX data, "
if v & NRF24.TX_DS:
s += "TX ok, "
else:
s += "no TX, "
if v & NRF24.MAX_RT:
s += "TX retries bad, "
else:
s += "TX retries ok, "
p = (v >> 1) & 7
if p < 6:
s += f"pipe {p} data, "
elif p == 6:
s += "PIPE 6 ERROR, "
else:
s += "no pipe data, "
if v & NRF24.TX_FULL:
s += "TX FIFO full"
else:
s += "TX FIFO not full"
return s
# OBSERVE_TX
# PLOS_CNT 7-4
# ARC_CNT 3-0
def format_observe_tx(self):
v = self._nrf_read_reg(NRF24.OBSERVE_TX, 1)[0]
plos = (v >> 4) & 15
arc = v & 15
s = f"OBSERVE_TX: (0x{v:02x}) => lost packets {plos}, retries {arc}"
return s
# RPD
# RPD 1 << 0
def format_rpd(self):
v = self._nrf_read_reg(NRF24.RPD, 1)[0]
s = f"RPD: (0x{v:02x}) => received power detector {v & 1}"
return s
# RX_ADDR_P0 - RX_ADDR_P5
@staticmethod
def _byte2hex(s):
hex_value = ''.join('{:02x}'.format(c) for c in reversed(s))
return hex_value
def format_rx_addr_px(self):
p0 = self._nrf_read_reg(NRF24.RX_ADDR_P0, 5)
p1 = self._nrf_read_reg(NRF24.RX_ADDR_P1, 5)
p2 = self._nrf_read_reg(NRF24.RX_ADDR_P2, 1)[0]
p3 = self._nrf_read_reg(NRF24.RX_ADDR_P3, 1)[0]
p4 = self._nrf_read_reg(NRF24.RX_ADDR_P4, 1)[0]
p5 = self._nrf_read_reg(NRF24.RX_ADDR_P5, 1)[0]
s = "RX ADDR_PX: "
s += f"P0=0x{self._byte2hex(p0)} "
s += f"P1=0x{self._byte2hex(p1)} "
s += f"P2=0x{p2:02x} "
s += f"P3=0x{p3:02x} "
s += f"P4=0x{p4:02x} "
s += f"P5=0x{p5:02x}"
return s
# TX_ADDR
def format_tx_addr(self):
p0 = self._nrf_read_reg(NRF24.TX_ADDR, 5)
s = f"TX_ADDR: 0x{self._byte2hex(p0)} "
return s
# RX_PW_P0 - RX_PW_P5
def format_rx_pw_px(self):
p0 = self._nrf_read_reg(NRF24.RX_PW_P0, 1)[0]
p1 = self._nrf_read_reg(NRF24.RX_PW_P1, 1)[0]
p2 = self._nrf_read_reg(NRF24.RX_PW_P2, 1)[0]
p3 = self._nrf_read_reg(NRF24.RX_PW_P3, 1)[0]
p4 = self._nrf_read_reg(NRF24.RX_PW_P4, 1)[0]
p5 = self._nrf_read_reg(NRF24.RX_PW_P5, 1)[0]
s = "RX_PW_PX: "
s += f"P0={p0:02x} P1={p1:02x} P2={p2:02x} P3={p3:02x} P4={p4:02x} P5={p5:02x} "
return s
# FIFO_STATUS
FTX_REUSE = 1 << 6
FTX_FULL = 1 << 5
FTX_EMPTY = 1 << 4
FRX_FULL = 1 << 1
FRX_EMPTY = 1 << 0
def format_fifo_status(self):
v = self._nrf_read_reg(NRF24.FIFO_STATUS, 1)[0]
s = f"FIFO_STATUS: (0x{v:02x}) => "
if v & NRF24.FTX_REUSE:
s += "TX reuse set, "
else:
s += "TX reuse not set, "
if v & NRF24.FTX_FULL:
s += "TX FIFO full, "
elif v & NRF24.FTX_EMPTY:
s += "TX FIFO empty, "
else:
s += "TX FIFO has data, "
if v & NRF24.FRX_FULL:
s += "RX FIFO full, "
elif v & NRF24.FRX_EMPTY:
s += "RX FIFO empty"
else:
s += "RX FIFO has data"
return s
# DYNPD
DPL_P7 = 1 << 7
DPL_P6 = 1 << 6
DPL_P5 = 1 << 5
DPL_P4 = 1 << 4
DPL_P3 = 1 << 3
DPL_P2 = 1 << 2
DPL_P1 = 1 << 1
DPL_P0 = 1 << 0
def format_dynpd(self):
v = self._nrf_read_reg(NRF24.DYNPD, 1)[0]
s = f"DYNPD: (0x{v:02x}) => "
for i in range(6):
if v & (1 << i):
s += f"P{i}:on "
else:
s += f"P{i}:off "
return s
# FEATURE
EN_DPL = 1 << 2
EN_ACK_PAY = 1 << 1
EN_DYN_ACK = 1 << 0
def format_feature(self):
v = self._nrf_read_reg(NRF24.FEATURE, 1)[0]
s = f"FEATURE: (0x{v:02x}) => "
if v & NRF24.EN_DPL:
s += "Dynamic payload on, "
else:
s += "Dynamic payload off, "
if v & NRF24.EN_ACK_PAY:
s += "ACK payload on, "
else:
s += "ACK payload off, "
if v & NRF24.EN_DYN_ACK:
s += "W_TX_PAYLOAD_NOACK on"
else:
s += "W_TX_PAYLOAD_NOACK off"
return s
``` |
{
"source": "4ydan/refnx",
"score": 2
} |
#### File: analysis/test/test_curvefitter.py
```python
import os.path
import pickle
import numpy as np
import scipy.optimize as sciopt
from scipy.stats import norm
import pytest
from numpy.testing import (
assert_,
assert_almost_equal,
assert_equal,
assert_allclose,
)
from refnx.analysis import (
CurveFitter,
Parameter,
Parameters,
Model,
Objective,
process_chain,
load_chain,
Bounds,
PDF,
autocorrelation_chain,
integrated_time,
)
from refnx.analysis.curvefitter import bounds_list
from refnx.dataset import Data1D
from refnx._lib import emcee, flatten
from NISTModels import NIST_runner, NIST_Models
def line(x, params, *args, **kwds):
p_arr = np.array(params)
return p_arr[0] + x * p_arr[1]
class TestCurveFitter:
def setup_method(self):
# Reproducible results!
np.random.seed(123)
self.m_true = -0.9594
self.b_true = 4.294
self.f_true = 0.534
self.m_ls = -1.1040757010910947
self.b_ls = 5.4405552502319505
# Generate some synthetic data from the model.
N = 50
x = np.sort(10 * np.random.rand(N))
y_err = 0.1 + 0.5 * np.random.rand(N)
y = self.m_true * x + self.b_true
y += np.abs(self.f_true * y) * np.random.randn(N)
y += y_err * np.random.randn(N)
self.data = Data1D(data=(x, y, y_err))
self.p = Parameter(self.b_ls, "b", vary=True, bounds=(-100, 100))
self.p |= Parameter(self.m_ls, "m", vary=True, bounds=(-100, 100))
self.model = Model(self.p, fitfunc=line)
self.objective = Objective(self.model, self.data)
assert_(len(self.objective.varying_parameters()) == 2)
mod = np.array(
[
4.78166609,
4.42364699,
4.16404064,
3.50343504,
3.4257084,
2.93594347,
2.92035638,
2.67533842,
2.28136038,
2.19772983,
1.99295496,
1.93748334,
1.87484436,
1.65161016,
1.44613461,
1.11128101,
1.04584535,
0.86055984,
0.76913963,
0.73906649,
0.73331407,
0.68350418,
0.65216599,
0.59838566,
0.13070299,
0.10749131,
-0.01010195,
-0.10010155,
-0.29495372,
-0.42817431,
-0.43122391,
-0.64637715,
-1.30560686,
-1.32626428,
-1.44835768,
-1.52589881,
-1.56371158,
-2.12048349,
-2.24899179,
-2.50292682,
-2.53576659,
-2.55797996,
-2.60870542,
-2.7074727,
-3.93781479,
-4.12415366,
-4.42313742,
-4.98368609,
-5.38782395,
-5.44077086,
]
)
self.mod = mod
self.mcfitter = CurveFitter(self.objective)
def test_bounds_list(self):
bnds = bounds_list(self.p)
assert_allclose(bnds, [(-100, 100), (-100, 100)])
# try making a Parameter bound a normal distribution, then get an
# approximation to box bounds
self.p[0].bounds = PDF(norm(0, 1))
assert_allclose(
bounds_list(self.p), [norm(0, 1).ppf([0.005, 0.995]), (-100, 100)]
)
def test_constraints(self):
# constraints should work during fitting
self.p[0].value = 5.4
self.p[1].constraint = -0.203 * self.p[0]
assert_equal(self.p[1].value, self.p[0].value * -0.203)
res = self.mcfitter.fit()
assert_(res.success)
assert_equal(len(self.objective.varying_parameters()), 1)
# lnsigma is parameters[0]
assert_(self.p[0] is self.objective.parameters.flattened()[0])
assert_(self.p[1] is self.objective.parameters.flattened()[1])
assert_almost_equal(self.p[0].value, res.x[0])
assert_almost_equal(self.p[1].value, self.p[0].value * -0.203)
# check that constraints work during sampling
# the CurveFitter has to be set up again if you change how the
# parameters are being fitted.
mcfitter = CurveFitter(self.objective)
assert_(mcfitter.nvary == 1)
mcfitter.sample(5)
assert_equal(self.p[1].value, self.p[0].value * -0.203)
# the constrained parameters should have a chain
assert_(self.p[0].chain is not None)
assert_(self.p[1].chain is not None)
assert_allclose(self.p[1].chain, self.p[0].chain * -0.203)
def test_mcmc(self):
self.mcfitter.sample(steps=50, nthin=1, verbose=False)
assert_equal(self.mcfitter.nvary, 2)
# smoke test for corner plot
self.mcfitter.objective.corner()
# we're not doing Parallel Tempering here.
assert_(self.mcfitter._ntemps == -1)
assert_(isinstance(self.mcfitter.sampler, emcee.EnsembleSampler))
# should be able to multithread
mcfitter = CurveFitter(self.objective, nwalkers=50)
res = mcfitter.sample(steps=33, nthin=2, verbose=False, pool=2)
# check that the autocorrelation function at least runs
acfs = mcfitter.acf(nburn=10)
assert_equal(acfs.shape[-1], mcfitter.nvary)
# check the standalone autocorrelation calculator
acfs2 = autocorrelation_chain(mcfitter.chain, nburn=10)
assert_equal(acfs, acfs2)
# check integrated_time
integrated_time(acfs2, tol=5)
# check chain shape
assert_equal(mcfitter.chain.shape, (33, 50, 2))
# assert_equal(mcfitter._lastpos, mcfitter.chain[:, -1, :])
assert_equal(res[0].chain.shape, (33, 50))
# if the number of parameters changes there should be an Exception
# raised
from pytest import raises
with raises(RuntimeError):
self.p[0].vary = False
self.mcfitter.sample(1)
# can fix by making the sampler again
self.mcfitter.make_sampler()
self.mcfitter.sample(1)
def test_random_seed(self):
# check that MCMC sampling is reproducible
self.mcfitter.sample(steps=2, random_state=1)
# get a starting pos
starting_pos = self.mcfitter._state.coords
# is sampling reproducible
self.mcfitter.reset()
self.mcfitter.initialise(pos=starting_pos)
self.mcfitter.sample(3, random_state=1, pool=1)
chain1 = np.copy(self.mcfitter.chain)
self.mcfitter.reset()
self.mcfitter.initialise(pos=starting_pos)
self.mcfitter.sample(3, random_state=1, pool=1)
chain2 = np.copy(self.mcfitter.chain)
assert_equal(chain1, chain2)
def test_mcmc_pt(self):
# smoke test for parallel tempering
x = np.array(self.objective.parameters)
mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
assert_equal(mcfitter.sampler.ntemps, 10)
assert len(list(flatten(self.objective.parameters))) == 2
# check that the parallel sampling works
# and that chain shape is correct
res = mcfitter.sample(steps=5, nthin=2, verbose=False, pool=-1)
assert_equal(mcfitter.chain.shape, (5, 10, 50, 2))
assert_equal(res[0].chain.shape, (5, 50))
assert_equal(mcfitter.chain[:, 0, :, 0], res[0].chain)
assert_equal(mcfitter.chain[:, 0, :, 1], res[1].chain)
chain = np.copy(mcfitter.chain)
assert len(list(flatten(self.objective.parameters))) == 2
# the sampler should store the probability
assert_equal(mcfitter.logpost.shape, (5, 10, 50))
assert_allclose(mcfitter.logpost, mcfitter.sampler._ptchain.logP)
logprobs = mcfitter.logpost
highest_prob_loc = np.argmax(logprobs[:, 0])
idx = np.unravel_index(highest_prob_loc, logprobs[:, 0].shape)
idx = list(idx)
idx.insert(1, 0)
idx = tuple(idx)
assert_equal(idx, mcfitter.index_max_prob)
pvals = mcfitter.chain[idx]
assert_allclose(logprobs[idx], self.objective.logpost(pvals))
# try resetting the chain
mcfitter.reset()
# test for reproducible operation
self.objective.setp(x)
mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
mcfitter.initialise("jitter", random_state=1)
mcfitter.sample(steps=5, nthin=2, verbose=False, random_state=2)
chain = np.copy(mcfitter.chain)
self.objective.setp(x)
mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
mcfitter.initialise("jitter", random_state=1)
mcfitter.sample(steps=5, nthin=2, verbose=False, random_state=2)
chain2 = np.copy(mcfitter.chain)
assert_allclose(chain2, chain)
def test_mcmc_init(self):
# smoke test for sampler initialisation
# TODO check that the initialisation worked.
# reproducible initialisation with random_state dependents
self.mcfitter.initialise("prior", random_state=1)
starting_pos = np.copy(self.mcfitter._state.coords)
self.mcfitter.initialise("prior", random_state=1)
starting_pos2 = self.mcfitter._state.coords
assert_equal(starting_pos, starting_pos2)
self.mcfitter.initialise("jitter", random_state=1)
starting_pos = np.copy(self.mcfitter._state.coords)
self.mcfitter.initialise("jitter", random_state=1)
starting_pos2 = self.mcfitter._state.coords
assert_equal(starting_pos, starting_pos2)
mcfitter = CurveFitter(self.objective, nwalkers=100)
mcfitter.initialise("covar")
assert_equal(mcfitter._state.coords.shape, (100, 2))
mcfitter.initialise("prior")
assert_equal(mcfitter._state.coords.shape, (100, 2))
mcfitter.initialise("jitter")
assert_equal(mcfitter._state.coords.shape, (100, 2))
# initialise with last position
mcfitter.sample(steps=1)
chain = mcfitter.chain
mcfitter.initialise(pos=chain[-1])
assert_equal(mcfitter._state.coords.shape, (100, 2))
# initialise with chain
mcfitter.sample(steps=2)
chain = mcfitter.chain
mcfitter.initialise(pos=chain)
assert_equal(mcfitter._state.coords, chain[-1])
# initialise with chain if it's never been run before
mcfitter = CurveFitter(self.objective, nwalkers=100)
mcfitter.initialise(chain)
# initialise for Parallel tempering
mcfitter = CurveFitter(self.objective, ntemps=20, nwalkers=100)
mcfitter.initialise("covar")
assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
mcfitter.initialise("prior")
assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
mcfitter.initialise("jitter")
assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
# initialise with last position
mcfitter.sample(steps=1)
chain = mcfitter.chain
mcfitter.initialise(pos=chain[-1])
assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
# initialise with chain
mcfitter.sample(steps=2)
chain = mcfitter.chain
mcfitter.initialise(pos=np.copy(chain))
assert_equal(mcfitter._state.coords, chain[-1])
# initialise with chain if it's never been run before
mcfitter = CurveFitter(self.objective, nwalkers=100, ntemps=20)
mcfitter.initialise(chain)
def test_fit_smoke(self):
# smoke tests to check that fit runs
def callback(xk):
return
def callback2(xk, **kws):
return
# L-BFGS-B
res0 = self.mcfitter.fit(callback=callback)
assert_almost_equal(res0.x, [self.b_ls, self.m_ls], 6)
res0 = self.mcfitter.fit()
res0 = self.mcfitter.fit(verbose=False)
res0 = self.mcfitter.fit(verbose=False, callback=callback)
# least_squares
res1 = self.mcfitter.fit(method="least_squares")
assert_almost_equal(res1.x, [self.b_ls, self.m_ls], 6)
# least_squares doesn't accept a callback. As well as testing that
# least_squares works, it checks that providing a callback doesn't
# trip the fitter up.
res1 = self.mcfitter.fit(method="least_squares", callback=callback)
assert_almost_equal(res1.x, [self.b_ls, self.m_ls], 6)
# need full bounds for differential_evolution
self.p[0].range(3, 7)
self.p[1].range(-2, 0)
res2 = self.mcfitter.fit(
method="differential_evolution",
seed=1,
popsize=10,
maxiter=100,
callback=callback2,
)
assert_almost_equal(res2.x, [self.b_ls, self.m_ls], 6)
# check that the res object has covar and stderr
assert_("covar" in res0)
assert_("stderr" in res0)
@pytest.mark.parametrize("model", NIST_Models)
def test_NIST(self, model):
# Run all the NIST standard tests with leastsq
NIST_runner(model)
def gauss(x, p0):
p = np.array(p0)
return p[0] + p[1] * np.exp(-(((x - p[2]) / p[3]) ** 2))
class TestFitterGauss:
# Test CurveFitter with a noisy gaussian, weighted and unweighted, to see
# if the parameters and uncertainties come out correct
@pytest.fixture(autouse=True)
def setup_method(self, tmpdir):
self.path = os.path.dirname(os.path.abspath(__file__))
self.tmpdir = tmpdir.strpath
theoretical = np.loadtxt(os.path.join(self.path, "gauss_data.txt"))
xvals, yvals, evals = np.hsplit(theoretical, 3)
xvals = xvals.flatten()
yvals = yvals.flatten()
evals = evals.flatten()
# these best weighted values and uncertainties obtained with Igor
self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]
self.best_weighted_errors = [
0.0220313708486,
1.12879436221,
0.0447659158681,
0.0412022938883,
]
self.best_weighted_chisqr = 77.6040960351
self.best_unweighted = [
-0.10584111872702096,
19.240347049328989,
0.0092623066070940396,
1.501362314145845,
]
self.best_unweighted_errors = [
0.34246565477,
0.689820935208,
0.0411243173041,
0.0693429375282,
]
self.best_unweighted_chisqr = 497.102084956
self.p0 = np.array([0.1, 20.0, 0.1, 0.1])
self.names = ["bkg", "A", "x0", "width"]
self.bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]
self.params = Parameters(name="gauss_params")
for p, name, bound in zip(self.p0, self.names, self.bounds):
param = Parameter(p, name=name)
param.range(*bound)
param.vary = True
self.params.append(param)
self.model = Model(self.params, fitfunc=gauss)
self.data = Data1D((xvals, yvals, evals))
self.objective = Objective(self.model, self.data)
return 0
def test_pickle(self):
# tests if a CurveFitter can be pickled/unpickled.
f = CurveFitter(self.objective)
pkl = pickle.dumps(f)
g = pickle.loads(pkl)
g._check_vars_unchanged()
def test_best_weighted(self):
assert_equal(len(self.objective.varying_parameters()), 4)
self.objective.setp(self.p0)
f = CurveFitter(self.objective, nwalkers=100)
res = f.fit("least_squares", jac="3-point")
output = res.x
assert_almost_equal(output, self.best_weighted, 3)
assert_almost_equal(
self.objective.chisqr(), self.best_weighted_chisqr, 5
)
# compare the residuals
res = (self.data.y - self.model(self.data.x)) / self.data.y_err
assert_equal(self.objective.residuals(), res)
# compare objective.covar to the best_weighted_errors
uncertainties = [param.stderr for param in self.params]
assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.005)
# we're also going to try the checkpointing here.
checkpoint = os.path.join(self.tmpdir, "checkpoint.txt")
# compare samples to best_weighted_errors
np.random.seed(1)
f.sample(steps=201, random_state=1, verbose=False, f=checkpoint)
process_chain(self.objective, f.chain, nburn=50, nthin=10)
uncertainties = [param.stderr for param in self.params]
assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)
# test that the checkpoint worked
check_array = np.loadtxt(checkpoint)
check_array = check_array.reshape(201, f._nwalkers, f.nvary)
assert_allclose(check_array, f.chain)
# test loading the checkpoint
chain = load_chain(checkpoint)
assert_allclose(chain, f.chain)
f.initialise("jitter")
f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
assert_equal(f.chain.shape[0], 2)
# we should be able to produce 2 * 100 steps from the generator
g = self.objective.pgen(ngen=20000000000)
s = [i for i, a in enumerate(g)]
assert_equal(np.max(s), 200 - 1)
g = self.objective.pgen(ngen=200)
pvec = next(g)
assert_equal(pvec.size, len(self.objective.parameters.flattened()))
# check that all the parameters are returned via pgen, not only those
# being varied.
self.params[0].vary = False
f = CurveFitter(self.objective, nwalkers=100)
f.initialise("jitter")
f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
g = self.objective.pgen(ngen=100)
pvec = next(g)
assert_equal(pvec.size, len(self.objective.parameters.flattened()))
# the following test won't work because of emcee/gh226.
# chain = load_chain(checkpoint)
# assert_(chain.shape == f.chain.shape)
# assert_allclose(chain, f.chain)
# try reproducing best fit with parallel tempering
self.params[0].vary = True
f = CurveFitter(self.objective, nwalkers=100, ntemps=10)
f.fit("differential_evolution", seed=1)
f.sample(steps=201, random_state=1, verbose=False)
process_chain(self.objective, f.chain, nburn=50, nthin=15)
print(self.params[0].chain.shape, self.params[0].chain)
uncertainties = [param.stderr for param in self.params]
assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)
def test_best_unweighted(self):
self.objective.weighted = False
f = CurveFitter(self.objective, nwalkers=100)
res = f.fit()
output = res.x
assert_almost_equal(
self.objective.chisqr(), self.best_unweighted_chisqr
)
assert_almost_equal(output, self.best_unweighted, 5)
# compare the residuals
res = self.data.y - self.model(self.data.x)
assert_equal(self.objective.residuals(), res)
# compare objective._covar to the best_unweighted_errors
uncertainties = np.array([param.stderr for param in self.params])
assert_almost_equal(uncertainties, self.best_unweighted_errors, 3)
# the samples won't compare to the covariance matrix...
# f.sample(nsteps=150, nburn=20, nthin=30, random_state=1)
# uncertainties = [param.stderr for param in self.params]
# assert_allclose(uncertainties, self.best_unweighted_errors,
# rtol=0.15)
def test_all_minimisers(self):
"""test minimisers against the Gaussian fit"""
f = CurveFitter(self.objective)
methods = [
"differential_evolution",
"L-BFGS-B",
"least_squares",
"shgo",
"dual_annealing",
]
for method in methods:
self.objective.setp(self.p0)
opts = {}
if method in ["differential_evolution", "dual_annealing"]:
opts = {"seed": 1}
res = f.fit(method=method, **opts)
assert_allclose(res.x, self.best_weighted, rtol=0.005)
# smoke test to check that we can use nlpost
self.objective.setp(self.p0)
logp0 = self.objective.logp()
# check that probabilities are calculated correctly
assert_allclose(
self.objective.logpost(),
self.objective.logp() + self.objective.logl(),
)
assert_allclose(self.objective.nlpost(), -self.objective.logpost())
assert_allclose(
self.objective.nlpost(self.p0), -self.objective.logpost(self.p0)
)
# if the priors are all uniform then the only difference between
# logpost and logl is a constant. A minimiser should converge on the
# same answer. The following tests examine that.
# The test works for dual_annealing, but not for differential
# evolution, not sure why that is.
self.objective.setp(self.p0)
res1 = f.fit(method="dual_annealing", seed=1)
assert_almost_equal(res1.x, self.best_weighted, 3)
nll1 = self.objective.nll()
nlpost1 = self.objective.nlpost()
self.objective.setp(self.p0)
res2 = f.fit(method="dual_annealing", target="nlpost", seed=1)
assert_almost_equal(res2.x, self.best_weighted, 3)
nll2 = self.objective.nll()
nlpost2 = self.objective.nlpost()
assert_allclose(nlpost1, nlpost2, atol=0.001)
assert_allclose(nll1, nll2, atol=0.001)
# these two priors are calculated for different parameter values
# (before and after the fit) they should be the same because all
# the parameters have uniform priors.
assert_almost_equal(self.objective.logp(), logp0)
def test_pymc3_sample(self):
# test sampling with pymc3
try:
import pymc3 as pm
from refnx.analysis import pymc3_model
except (ModuleNotFoundError, ImportError, AttributeError):
# can't run test if pymc3/theano not installed
return
with pymc3_model(self.objective):
s = pm.NUTS()
pm.sample(
200,
tune=100,
step=s,
discard_tuned_samples=True,
compute_convergence_checks=False,
random_seed=1,
)
# don't check for accuracy because it requires a heap more
# draws.
# means = [np.mean(trace[f"p{i}"]) for i in range(4)]
# assert_allclose(means, self.best_weighted, rtol=0.04)
# errors = [np.std(trace[f"p{i}"]) for i in range(4)]
# assert_allclose(errors, self.best_weighted_errors, atol=0.02)
"""
The Gaussian example sampling can also be performed with pymc3.
The above results from emcee have been verified against pymc3 - the
unweighted sampling statistics are the same.
from pymc3 import (Model, Normal, HalfNormal, Flat, Uniform,
find_MAP, NUTS, sample, summary, traceplot)
basic_model = Model()
with basic_model:
# Priors for unknown model parameters
bkg = Uniform('bkg', -1, 5)
A0 = Uniform('A0', 0, 50)
x0 = Uniform('x0', min(x), max(x))
width = Uniform('width', 0.5, 10)
# Expected value of outcome
mu = bkg + A0 * np.exp(-((x - x0) / width) ** 2)
# Likelihood (sampling distribution) of observations
# y_obs = Normal('y_obs', mu=mu, sd=e, observed=y)
y_obs = Normal('y_obs', mu=mu, observed=y)
with basic_model:
# draw 500 posterior samples
trace = sample(500)
summary(trace)
"""
```
#### File: refnx/dataset/__init__.py
```python
from refnx.dataset.data1d import Data1D
from refnx.dataset.reflectdataset import ReflectDataset, OrsoDataset
from refnx._lib._testutils import PytestTester
from refnx._lib import possibly_open_file as _possibly_open_file
test = PytestTester(__name__)
del PytestTester
def load_data(f):
"""
Loads a dataset
Parameters
----------
f: {file-like, str}
f can be a string or file-like object referring to a File to
load the dataset from.
Returns
-------
data: Data1D-like
data object
"""
try:
data = OrsoDataset(f)
return data
except Exception:
# not an ORSO file
pass
try:
d = ReflectDataset(f)
return d
except Exception:
pass
d = Data1D(f)
return d
__all__ = [s for s in dir() if not s.startswith("_")]
```
#### File: dataset/test/test_dataset.py
```python
import os.path
import pytest
import glob
from refnx.dataset import ReflectDataset, Data1D, load_data, OrsoDataset
import numpy as np
from numpy.testing import assert_equal
class TestReflectDataset:
@pytest.fixture(autouse=True)
def setup_method(self, tmpdir):
self.pth = os.path.dirname(os.path.abspath(__file__))
self.cwd = os.getcwd()
self.tmpdir = tmpdir.strpath
os.chdir(self.tmpdir)
def teardown_method(self):
os.chdir(self.cwd)
def test_ort_load(self):
d = load_data(os.path.join(self.pth, "ORSO_data.ort"))
assert len(d) == 2
assert isinstance(d, OrsoDataset)
d.refresh()
def test_load_data(self):
# test the load_data function by trying to load all the files in the
# test directory
fs = glob.glob("*.*")
fs = [f for f in fs if not f.endswith(".py")]
fs = [f for f in fs if not f.startswith("coef_")]
for f in fs:
load_data(f)
```
#### File: 4ydan/refnx/setup.py
```python
from setuptools import setup, Extension, find_packages
from setuptools.command.test import test as TestCommand
import os
import subprocess
import platform
import sys
import warnings
import glob
import tempfile
import textwrap
import subprocess
try:
from Cython.Build import cythonize
except ImportError:
USE_CYTHON = False
warnings.warn("Cython was not found. Slow reflectivity calculations will be used.")
else:
USE_CYTHON = True
###############################################################################
"""
Is openMP usable?
"""
CCODE = textwrap.dedent(
"""\
#include <omp.h>
#include <stdio.h>
int main(void) {
#pragma omp parallel
printf("nthreads=%d\\n", omp_get_num_threads());
return 0;
}
"""
)
def get_openmp_flag(compiler):
if hasattr(compiler, "compiler"):
compiler = compiler.compiler[0]
else:
compiler = compiler.__class__.__name__
if sys.platform == "win32" and ("icc" in compiler or "icl" in compiler):
return ["/Qopenmp"]
elif sys.platform == "win32":
return ["/openmp"]
elif sys.platform == "darwin" and ("icc" in compiler or "icl" in compiler):
return ["-openmp"]
elif sys.platform == "darwin":
# default for macOS, assuming Apple-clang
# -fopenmp can't be passed as compile flag when using Apple-clang.
# OpenMP support has to be enabled during preprocessing.
#
# it may be possible that someone builds with a different/updated
# compiler (don't know how to check for that).
#
# set the following environment variables, assumes that llvm openmp
# has been built and installed by the user.
#
# brew install libomp
# export CC=clang
# export CXX =clang++
# export CXXFLAGS="$CXXFLAGS -Xpreprocessor -fopenmp"
# export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
# export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
# export LDFLAGS="$LDFLAGS -L/usr/local/opt/libomp/lib -lomp"
# export DYLD_LIBRARY_PATH =/usr/local/opt/libomp/lib
return []
# Default flag for GCC and clang:
return ["-fopenmp"]
def check_openmp_support():
"""Check whether OpenMP test code can be compiled and run"""
try:
from setuptools._distutils.ccompiler import new_compiler
from setuptools._distutils.sysconfig import customize_compiler
# from numpy.distutils.ccompiler import new_compiler
# from distutils.sysconfig import customize_compiler
from distutils.errors import CompileError, LinkError
except ImportError:
return False
ccompiler = new_compiler()
customize_compiler(ccompiler)
start_dir = os.path.abspath(".")
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.chdir(tmp_dir)
# Write test program
with open("test_openmp.c", "w") as f:
f.write(CCODE)
os.mkdir("objects")
# Compile, test program
openmp_flags = get_openmp_flag(ccompiler)
ccompiler.compile(
["test_openmp.c"], output_dir="objects", extra_postargs=openmp_flags
)
# Link test program
extra_preargs = os.getenv("LDFLAGS", None)
if extra_preargs is not None:
extra_preargs = extra_preargs.split(" ")
else:
extra_preargs = []
objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension))
ccompiler.link_executable(
objects,
"test_openmp",
extra_preargs=extra_preargs,
extra_postargs=openmp_flags,
)
# Run test program
output = subprocess.check_output("./test_openmp")
output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
# Check test program output
if "nthreads=" in output[0]:
nthreads = int(output[0].strip().split("=")[1])
openmp_supported = len(output) == nthreads
else:
openmp_supported = False
except (CompileError, LinkError, subprocess.CalledProcessError):
openmp_supported = False
finally:
os.chdir(start_dir)
return openmp_supported
# do you want to parallelise things with openmp?
HAS_OPENMP = check_openmp_support()
# HAS_OPENMP = False
###############################################################################
# versioning
MAJOR = 0
MINOR = 1
MICRO = 26
ISRELEASED = False
VERSION = f"{MAJOR}.{MINOR}.{MICRO}"
# are we on windows, darwin, etc?
platform = sys.platform
packages = find_packages()
try:
idx = packages.index("benchmarks")
if idx >= 0:
packages.pop(idx)
idx = packages.index("benchmarks.benchmarks")
if idx >= 0:
packages.pop(idx)
idx = packages.index("motofit")
if idx >= 0:
packages.pop(idx)
except ValueError:
pass
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ["SYSTEMROOT", "PATH"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(["git", "rev-parse", "HEAD"])
GIT_REVISION = out.strip().decode("ascii")
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of refnx.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists(".git"):
GIT_REVISION = git_version()
elif os.path.exists("refnx/version.py"):
# must be a source distribution, use existing version file
# load it as a separate module to not load refnx/__init__.py
import imp
version = imp.load_source("refnx.version", "refnx/version.py")
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += ".dev0+" + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename="refnx/version.py"):
cnt = """
# THIS FILE IS GENERATED FROM REFNX SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, "w")
try:
a.write(
cnt
% {
"version": VERSION,
"full_version": FULLVERSION,
"git_revision": GIT_REVISION,
"isrelease": str(ISRELEASED),
}
)
finally:
a.close()
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = "refnx"
def run_tests(self):
import shlex
import pytest
print("Running tests with pytest")
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
# refnx setup
info = {
"packages": packages,
"include_package_data": True,
"cmdclass": {"test": PyTest},
}
####################################################################
# this is where setup starts
####################################################################
def setup_package():
# Rewrite the version file every time
write_version_py()
info["version"] = get_version_info()[0]
print(info["version"])
if USE_CYTHON:
# Obtain the numpy include directory. This logic works across numpy
# versions.
ext_modules = []
HAS_NUMPY = True
try:
import numpy as np
except:
info["setup_requires"] = ["numpy"]
HAS_NUMPY = False
if HAS_NUMPY:
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
_cevent = Extension(
name="refnx.reduce._cevent",
sources=["src/_cevent.pyx"],
include_dirs=[numpy_include],
language="c++",
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
# libraries=
# extra_compile_args = "...".split(),
)
ext_modules.append(_cevent)
_cutil = Extension(
name="refnx._lib._cutil",
sources=["src/_cutil.pyx"],
include_dirs=[numpy_include],
language="c",
# libraries=
# extra_compile_args = "...".split(),
)
ext_modules.append(_cutil)
# creflect extension module
# Compile reflectivity calculator to object with C compiler
# first.
# It's not possible to do this in an Extension object because
# the `-std=c++11` compile arg and C99 C code are incompatible
# (at least on Darwin).
from setuptools._distutils.ccompiler import new_compiler
from setuptools._distutils.sysconfig import customize_compiler
# from numpy.distutils.ccompiler import new_compiler
# from distutils.sysconfig import customize_compiler
ccompiler = new_compiler()
customize_compiler(ccompiler)
ccompiler.verbose = True
extra_preargs = [
"-O2",
]
if sys.platform == "win32":
# use the C++ code on Windows. The C++ code uses the
# std::complex<double> object for its arithmetic.
f = ["src/refcalc.cpp"]
else:
# and C code on other machines. The C code uses C99 complex
# arithmetic which is 10-20% faster.
# the CMPLX macro was only standardised in C11
extra_preargs.extend(
[
"-std=c11",
"-funsafe-math-optimizations",
"-ffinite-math-only",
]
)
f = ["src/refcalc.c"]
refcalc_obj = ccompiler.compile(f, extra_preargs=extra_preargs)
# print(refcalc_obj)
_creflect = Extension(
name="refnx.reflect._creflect",
sources=["src/_creflect.pyx", "src/refcaller.cpp"],
include_dirs=[numpy_include],
language="c++",
extra_compile_args=["-std=c++11"],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
extra_objects=refcalc_obj,
)
ext_modules.append(_creflect)
# if we have openmp use pure cython version
# openmp should be present on windows, linux
#
# However, it's not present in Apple Clang. Therefore one has to
# jump through hoops to enable it.
# It's probably easier to install OpenMP on macOS via homebrew.
# However, it's fairly simple to build the OpenMP library, and
# installing it into PREFIX=/usr/local
#
# https://gist.github.com/andyfaff/084005bee32aee83d6b59e843278ab3e
#
# Instructions for macOS:
#
# brew install libomp
# export CC=clang
# export CXX=clang++
# export CXXFLAGS="$CXXFLAGS -Xpreprocessor -fopenmp"
# export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
# export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
# export LDFLAGS="$LDFLAGS -L/usr/local/opt/libomp/lib -lomp"
# export DYLD_LIBRARY_PATH=/usr/local/opt/libomp/lib
if HAS_OPENMP:
# cyreflect extension module
_cyreflect = Extension(
name="refnx.reflect._cyreflect",
sources=["src/_cyreflect.pyx"],
include_dirs=[numpy_include],
language="c++",
extra_compile_args=[],
extra_link_args=[]
# libraries=
# extra_compile_args = "...".split(),
)
openmp_flags = get_openmp_flag(ccompiler)
_cyreflect.extra_compile_args += openmp_flags
_cyreflect.extra_link_args += openmp_flags
ext_modules.append(_cyreflect)
# specify min deployment version for macOS
if platform == "darwin":
for mod in ext_modules:
mod.extra_compile_args.append("-mmacosx-version-min=10.9")
info["ext_modules"] = cythonize(ext_modules)
info["zip_safe"] = False
try:
setup(**info)
except ValueError:
# there probably wasn't a C-compiler (windows). Try removing extension
# compilation
print("")
print("*****WARNING*****")
print(
"You didn't try to build the Reflectivity calculation extension."
" Calculation will be slow, falling back to pure python."
" To compile extension install cython. If installing in windows you"
" should then install from Visual Studio command prompt (this makes"
" C compiler available"
)
print("*****************")
print("")
info.pop("cmdclass")
info.pop("ext_modules")
setup(**info)
if __name__ == "__main__":
setup_package()
``` |
{
"source": "4yeesnice/1stproject",
"score": 4
} |
#### File: 4yeesnice/1stproject/22.py
```python
list = {"username": [],
'password': []
}
def registration():
i = 0
while i != 2:
username = input()
# >8 symbols
password = input()
# only letters
check_password = input()
if len(username) >= 8 and password.isalpha() is False:
if check_password == password:
return username, password
i = 2
else:
print("Ошибка! Используйте больше чем 8 символов в логине. Придумайте пароль с цифрами и буквами!")
i = 2
try:
i = 0
while i <= 2:
username, password = registration()
list['username'].append(username)
list['password'].append(password)
i += 1
print(list)
def log_in(list):
login = input()
password = input()
i = 0
while i < len(list['username']):
if login in list['username'][i] and password in list['password'][i]:
print("SUCCESFUL")
break
else:
print("UNSUCCESFUL")
i += 1
log_in(list)
except TypeError:
()
``` |
{
"source": "4yeesnice/ClaSs-Objects",
"score": 3
} |
#### File: 4yeesnice/ClaSs-Objects/flora_fauna.py
```python
import tsk4
class Flora:
def __init__(self, name, lifespan, habitat, plant_type):
self.name = name
self.lifespan = lifespan
self.habitat = habitat
self.plant_type = plant_type
self.plant_size = 0
def ad_flora(self, planet:tsk4.Planet):
if planet.flora:
planet.add_flora(self.name,self.plant_type)
class Fauna:
def __init__(self, name):
self.name = name
class Predator(Fauna):
def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int):
super().__init__(name)
self.predator_type = predator_type
self.what_eats = what_eats
self.lifespan = lifespan
def check_planet(self, planet: tsk4.Planet):
if planet.flora and planet.fauna and not planet.humanity:
planet.add_fauna(self.name,self.predator_type)
# def check_planet(self,planet:tsk4.Planet):
# if planet.fauna and not planet.humanity:
# print('YES')
# else:
# print('NO')
class Mammal(Fauna):
def __init__(self, name, mammal_type, lifespan):
super().__init__(name)
self.mammal_type = mammal_type
self.lifespan = lifespan
def check_planet(self,planet:tsk4.Planet):
if planet.flora and planet.fauna and not planet.humanity and 'grass' and 'wheat' in planet.flora_list["name"]:
planet.add_fauna(self.name,self.mammal_type)
shark = Predator('baby shark','predator','all',20)
shark.check_planet(tsk4.friendly)
grass = Flora("grass",20,"mammal","grass")
wheat = Flora("wheat", 20, "mammal", 'wheat')
wheat.ad_flora(tsk4.friendly)
grass.ad_flora(tsk4.friendly)
print(tsk4.friendly.flora_list)
giraffe = Mammal('malwan','earth',20)
giraffe.check_planet(tsk4.friendly)
marti = Mammal('marti','earth',20)
marti.check_planet(tsk4.friendly)
print(tsk4.friendly.__dict__)
``` |
{
"source": "4yeesnice/CodeWars",
"score": 4
} |
#### File: 4yeesnice/CodeWars/Catching Car Mileage Numbers.py
```python
number = "255"
awesome_phrases = [1337,256]
def test_2(number, awesome_phrases):
if int(number) in awesome_phrases:
return 2
else:
for i in awesome_phrases:
if abs(i-int(number))<=2:
return 1
break
else:
pass
'''
|-----------------------|
|check for same number |
|-----------------------|
'''
def same_number(number):
if len(number)==2:
return 0
elif number[1:]==("0"*(len(number[1:]))):
return 2
elif len(set(number))==1:
return 2
else:
return 0
'''
|-----------------------|
|check for incrementing |
|-----------------------|
'''
def incrementing(number):
if len(number)==2:
return 0
count = 0
if number[::-1][0]=="0":
number = number[:len(number)-1]
while count!=len(number) and len(number)!=1 and len(number)!=2:
for i in number:
if count == len(number)-1 and len(number)!=1:
return 2
break
elif int(number[number.index(i)+1])-int(number[number.index(i)])==1 and len(number)!=1:
count+=1
pass
else:
break
break
'''
|-----------------------|
|check for decrementing |
|-----------------------|
'''
def decrementing(number):
if len(number)==2:
return 0
count = 0
while count!=len(number) and len(number)!=1 and len(number)!=2:
for i in number:
if count == len(number)-1:
return 2
break
elif int(number[number.index(i)])-int(number[number.index(i)+1])==1:
count+=1
pass
elif len(number)==1:
break
else:
break
break
def palindrome(number):
if len(number)==2:
return 0
if len(number)!=1:
temp=int(number)
rev=0
while(int(number)>0):
dig=int(number)%10
rev=rev*10+dig
number=int(number)//10
if(temp==rev):
return 2
else:
return 0
else:
return 0
if test_2(number, awesome_phrases)==2 or same_number(number) or palindrome(number)==2 or incrementing(number)==2 or decrementing(number)==2:
print(2)
elif test_2(str(int(number)+2), awesome_phrases)==1 or same_number(str(int(number)+2)) or incrementing(str(int(number)+2))==2 or decrementing(str(int(number)+2)) or palindrome(str(int(number)+2)):
print(1)
elif test_2(str(int(number)+1), awesome_phrases)==1 or same_number(str(int(number)+1)) or incrementing(str(int(number)+1))==2 or decrementing(str(int(number)+1)) or palindrome(str(int(number)+1)):
print(1)
else:
if len(number)==2:
print(0)
else:
print(0)
def is_incrementing(number): return str(number) in '1234567890'
def is_decrementing(number): return str(number) in '9876543210'
def is_palindrome(number): return str(number) == str(number)[::-1]
def is_round(number): return set(str(number)[1:]) == set('0')
def is_interesting(number, awesome_phrases):
tests = (is_round, is_incrementing, is_decrementing,
is_palindrome, awesome_phrases.__contains__)
for num, color in zip(range(number, number+3), (2, 1, 1)):
if num >= 100 and any(test(num) for test in tests):
return color
return 0
#NOTE the code down below is the same as non-commented one. The difference is "return" instead of print and code down below is 1 function containing mupltiple functions.
# def is_interesting(number, awesome_phrases):
# number = str(number)
# def test_2(number, awesome_phrases):
# if len(number)<=2:
# return 0
# if int(number) in awesome_phrases:
# return 2
# else:
# for i in awesome_phrases:
# if abs(i-int(number))<=2:
# return 1
# break
# else:
# pass
# '''
# check for same number
# '''
# def same_number(number):
# if len(number)<=2:
# return 0
# elif number[1:]==("0"*(len(number[1:]))):
# return 2
# elif len(set(number))==1:
# return 2
# else:
# return 0
# '''
# |-----------------------|
# |check for incrementing |
# |-----------------------|
# '''
# def incrementing(number):
# if len(number)<=2:
# return 0
# count = 0
# if number[::-1][0]=="0":
# number = number[:len(number)-1]
# while count!=len(number) and len(number)!=1 and len(number)!=2:
# for i in number:
# if count == len(number)-1 and len(number)!=1:
# return 2
# break
# elif int(number[number.index(i)+1])-int(number[number.index(i)])==1 and len(number)!=1:
# count+=1
# pass
# else:
# break
# break
# '''
# |-----------------------|
# |check for decrementing |
# |-----------------------|
# '''
# def decrementing(number):
# if len(number)<=2:
# return 0
# count = 0
# while count!=len(number) and len(number)!=1 and len(number)!=2:
# for i in number:
# if count == len(number)-1:
# return 2
# break
# elif int(number[number.index(i)])-int(number[number.index(i)+1])==1:
# count+=1
# pass
# elif len(number)==1:
# break
# else:
# break
# break
# def palindrome(number):
# if len(number)<=2:
# return 0
# if len(number)!=1:
# temp=int(number)
# rev=0
# while(int(number)>0):
# dig=int(number)%10
# rev=rev*10+dig
# number=int(number)//10
# if(temp==rev):
# return 2
# else:
# return 0
# else:
# return 0
# if test_2(number, awesome_phrases)==2 or same_number(number) or palindrome(number)==2 or incrementing(number)==2 or decrementing(number)==2:
# return 2
# elif test_2(str(int(number)+2), awesome_phrases)==1 or same_number(str(int(number)+2)) or incrementing(str(int(number)+2))==2 or decrementing(str(int(number)+2)) or palindrome(str(int(number)+2)):
# return 1
# elif test_2(str(int(number)+1), awesome_phrases)==1 or same_number(str(int(number)+1)) or incrementing(str(int(number)+1))==2 or decrementing(str(int(number)+1)) or palindrome(str(int(number)+1)):
# return 1
# else:
# if len(number)==2:
# return 0
# else:
# return 0
``` |
{
"source": "4YouSee-Suporte/4youseewebhook",
"score": 2
} |
#### File: webhook/base/home_view.py
```python
from django.http import HttpResponse
def home(request):
return HttpResponse('Página para uso exclusivo de 4YouSee')
```
#### File: base/templatetags/filter_extras.py
```python
import datetime
from django import template
import itertools
from num2words import num2words
register = template.Library()
@register.filter
def group_by_day(list_all_objects):
"""
Recebe uma lista de objetos onde cada um deles possui um atributo tipo date e logo ordena eles por dia.
:param: list_all_objects: Register.objects.all(): django.db.models.query.QuerySet
:return: objects_day_ordered: [[objetos_do_dia_1][objetos_do_dia_2][objetos_do_dia_3]]: list of lists
"""
key_func = lambda x: x.date.day
objects_day_ordered = []
for key, group in itertools.groupby(list_all_objects, key_func):
objects_day_ordered.insert(0, list(group))
return objects_day_ordered
@register.filter
def num_to_word(num):
return num2words(num)
@register.filter
def date_from_minute(m):
"""
Calc date from minutes and return a string with the date.
:param minutes: 351702: int
:return: 'Jan 25 de 2021 15:44'
"""
if m is not None:
now = datetime.datetime.utcnow()
deltadate = now - datetime.timedelta(minutes=m)
return deltadate.strftime("%d %b %Y %H:%M")
@register.filter
def all_records(conta):
qty = len(conta.categories.all()) + len(conta.players.all()) + len(conta.playlists.all()) \
+ len(conta.medias.all()) + len(conta.records.all())
return qty
@register.filter
def player_full_name(id, conta):
try:
return (player := conta.players.get(player_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
@register.filter
def media_full_name(id, conta):
try:
return (media := conta.medias.get(media_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
``` |
{
"source": "4yub1k/captive-portal",
"score": 3
} |
#### File: portal/pages/models.py
```python
from django.db import models
# Create your models here.
class LoginData(models.Model):
username = models.CharField(max_length=100)
password = models.CharField(max_length=100)
def __str__(self):
return self.username
```
#### File: portal/pages/views.py
```python
from django.shortcuts import render
from .models import LoginData
# Create your views here.
def login(request):
return render(request,'pages/login.html')
def error(request):
return render(request,'pages/error.html')
def success(request):
if request.method=="POST":
username = request.POST["username"]
password = request.POST["password"]
login = LoginData.objects.create(username=username, password=password)
login.save()
# context ={
# 'username' : username,
# 'password' : password
# }
return render(request,'pages/success.html')
return render(request,'pages/success.html')
def error_404_view(request, exception):
return render(request,'pages/login.html')
``` |
{
"source": "4yub1k/django-blog",
"score": 2
} |
#### File: blog/app/views.py
```python
from .models import Post
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.models import User
from django.utils import timezone
from django.contrib.auth.decorators import login_required
#list-dictionary
# posts = [
# {
# 'author':'Ayubi',
# 'title':'New post',
# 'content':'New technology',
# 'date':'1/1/9000'
# },
# {
# 'author':'Ayubi',
# 'title':'New post',
# 'content':'New technology',
# 'date':'1/1/9000'
# }
# ]
def home(request):
posts=Post.objects.all()
context = {
'posts':posts,
'title':'Home'
}
return render(request,'app/home.html',context)
def about(request):
return render(request,'app/about.html',{'title':'About'})
def post_(request, post):
posts=Post.objects.filter(pk=post).exists()#check if post there
if posts:
#posts=get_object_or_404(post_check, pk=post) for post doesnt exist
posts=Post.objects.get(pk=post)
context = {
'posts':posts,
'title':'Post',
}
return render(request,'users/post_.html',context)
else:
return redirect('home')
@login_required
def add(request):
if request.method == "POST":
title=request.POST['title']
content=request.POST['content']
#post_check=Post.objects.all().filter(author_id=author_id)
post_add=Post(author_id=request.user.id,title=title,content=content)
post_add.save()
messages.success(request,'Post Added !')
return redirect('home')
return render(request, 'users/addpost.html',{'title':'Post','date':timezone.now})
``` |
{
"source": "4yub1k/job-portal-django",
"score": 2
} |
#### File: job-portal-django/apply/models.py
```python
from django.db import models
from listings.models import PostJob
# Create your models here.
class ApplicantForm(models.Model):
job=models.CharField(max_length=100)
name = models.CharField(max_length=50)
email = models.EmailField(max_length=200)
mobile = models.CharField(max_length=15)
education = models.CharField(max_length=50)
exp = models.CharField(max_length=50)
resume = models.FileField(upload_to='resume/%Y/%m/%d/')
def __str__(self):
return self.name
class Review(models.Model):
name = models.ForeignKey(ApplicantForm, on_delete=models.CASCADE, null=False)
reviewd = models.BooleanField(default=False)
ratings =models.CharField(max_length=2,default=0)
remarks = models.TextField(blank=True)
def __str__(self):
return self.name.name
```
#### File: job-portal-django/apply/signals.py
```python
from django.db.models.signals import post_save
from .models import Review
from django.dispatch import receiver
from .models import ApplicantForm
@receiver(post_save, sender=ApplicantForm)
def create_review(sender, instance, created, **kwargs):
if created:
Review.objects.create(name=instance)
``` |
{
"source": "4yub1k/Python-Wifi-Password",
"score": 3
} |
#### File: 4yub1k/Python-Wifi-Password/wifi-gui.py
```python
from tkinter import * #import all from tkinter
import subprocess
window=Tk()#window.geometry("400x200")#command mai function k saat () nahi dalni
window.title("Wifi")#title
class wifi:
def user(self):
x_1=subprocess.Popen("netsh wlan show profiles",shell=True,stdout=subprocess.PIPE) #run IDE as admin
stdout=x_1.communicate()[0]
return stdout
def password_1(self,name):
r='1'
for pswd in name:
#remove spaces from both sides [pswd[1].strip()]
CREATE_NO_WINDOW= 0x08000000
x_1=subprocess.Popen(r'netsh wlan show profiles name="%s" key=clear' %pswd[1].strip(),shell=True,stdout=subprocess.PIPE) #run IDE as admin
#-----> Important NOTe:if there are spaces between the values then send raw strings FORMAT[r'"<type>"' %<variable>]
#----->Make sure remove unwanted space in names
#communicate() method is used to get the output from pipe
stdout=x_1.communicate()[0]
stdout=stdout.decode().splitlines()
U="u"+r #use can use loop through list also
for line in stdout:
if "Key Content" in line:
#print("Username : %s\nPassword : %s " % (pswd[1],line.split(":")[1]))
self.labels(U,r,pswd[1],line)
r=str(int(r)+1)
U1=Label(window,text="@ayuboid - <EMAIL>")
U1.grid(row=int(r),columnspan=2)
def labels(self,U,r,pswd,line):
U=Label(window,text=pswd)
U.grid(row=int(r),column=0)
U=Text(window,height=1,width=20)
U.grid(row=int(r),column=1)
U.insert(END,line.split(":")[1])
class wifi_1(wifi):
def username(self):
x_1=self.user()
name_list=[]
#Decode (binary to utf8) and then split it by lines
x_1=x_1.decode().splitlines()
#Extract the string from list
for l in x_1:
#Check For the string in given line
if "All User Profile" in l:
#Split the current line from : e-g test : OK -----> ['test','OK'] and append to list
name_list.append(l.split(":"))
return name_list
def password(self):
name_list=self.username()
name=self.password_1(name_list)
b=wifi_1()
#Defaults
U1=Label(window,text="< Wifi >")
U1.grid(row=0,column=0)
U1=Label(window,text="< Password >")
U1.grid(row=0,column=1)
#print(b.username()) GET USERNAMES
#You can also use os.system
b1=Button(window ,text="Show", command=b.password,borderwidth=4,foreground="Green")
b1.grid(row=0,column=3)
window.mainloop()
```
#### File: 4yub1k/Python-Wifi-Password/wifi.py
```python
import subprocess
class t:
def user(self):
x_1=subprocess.Popen("netsh wlan show profiles",stdout=subprocess.PIPE) #run IDE as admin
stdout=x_1.communicate()[0]
return stdout
def password_1(self,name):
for pswd in name:
#remove spaces from both sides [pswd[1].strip()]
x_1=subprocess.Popen(r'netsh wlan show profiles name="%s" key=clear' %pswd[1].strip(),stdout=subprocess.PIPE) #run IDE as admin
#-----> Important NOTe:if there are spaces between the values then send raw strings FORMAT[r'"<type>"' %<variable>]
#----->Make sure remove unwanted space in names
#communicate() method is used to get the output from pipe
stdout=x_1.communicate()[0]
stdout=stdout.decode().splitlines()
for line in stdout:
if "Key Content" in line:
print("Username : %s\nPassword : %s " % (pswd[1],line.split(":")[1]))
class t1(t):
def username(self):
x_1=self.user()
name_list=[]
#Decode (binary to utf8) and then split it by lines
x_1=x_1.decode().splitlines()
#Extract the string from list
for l in x_1:
#Check For the string in given line
if "All User Profile" in l:
#Split the current line from : e-g test : OK -----> ['test','OK'] and append to list
name_list.append(l.split(":"))
return name_list
def password(self):
name_list=self.username()
name=self.password_1(name_list)
b=t1()
b.password()
#print(b.username()) GET USERNAMES
#You can also use os.system
``` |
{
"source": "4yub1k/real-estate-project",
"score": 2
} |
#### File: real-estate-project/listings/views.py
```python
from django.shortcuts import render
from listings.models import Listing
from listings.choices import bedroom_number,price_range,state_name
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
def property_id(request, property_id):
listings=Listing.objects.get(pk=property_id)
context = {
'listing':listings,
}
return render(request,'listings/listing.html',context)
def property_search(request):
search=Listing.objects.order_by("-list_date")
if 'keyword' in request.GET:
keyword = request.GET.get('keyword')
if keyword:
search=search.filter(description__icontains=keyword)
if 'city' in request.GET:
city = request.GET.get('city')
if city:
search=search.filter(city__iexact=city)
if 'state' in request.GET:
state = request.GET.get('state')
if state:
search=search.filter(state__iexact=state)
if 'bedrooms' in request.GET:
bedrooms = request.GET.get('bedrooms')
if bedrooms:
search=search.filter(bedrooms=bedrooms)
if 'price' in request.GET:
price = request.GET.get('price')
if price:
search=search.filter(price__lte=price)
"""Paginator"""
paginator=Paginator(search,3)
page_number = request.GET.get('page')
page_property = paginator.get_page(page_number)
context = {
'listings':page_property,
'bedroom_number':bedroom_number,
'price_range':price_range,
'state_name':state_name
}
return render(request,'listings/listings.html',context)
def properties(request):
page_property=Listing.objects.order_by("-list_date")
paginator=Paginator(page_property,3) #show 3 only
page_number = request.GET.get('page')
page_property = paginator.get_page(page_number)
context = {
'listings':page_property,
'bedroom_number':bedroom_number,
'price_range':price_range,
'state_name':state_name
}
return render(request,'listings/listings.html',context)
``` |
{
"source": "5002coyrrah/DiscordBot",
"score": 3
} |
#### File: 5002coyrrah/DiscordBot/train.py
```python
import keras
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.backend import reshape
from keras.utils.np_utils import to_categorical
def initBoard():
board = [
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
return board
def movesToBoard(moves):
board = initBoard()
for move in moves:
player = move[0]
coords = move[1]
board[coords[0]][coords[1]] = player
return board
def getMoves(board):
moves = []
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
moves.append((i, j))
return moves
def getModel():
numCells = 9
outcomes = 3
model = Sequential()
model.add(Dense(200, activation='relu', input_shape=(9, )))
model.add(Dropout(0.2))
model.add(Dense(125, activation='relu'))
model.add(Dense(75, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(25, activation='relu'))
model.add(Dense(outcomes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
return model
def getWinner(board):
candidate = 0
won = 0
# Check rows
for i in range(len(board)):
candidate = 0
for j in range(len(board[i])):
# Make sure there are no gaps
if board[i][j] == 0:
break
# Identify the front-runner
if candidate == 0:
candidate = board[i][j]
# Determine whether the front-runner has all the slots
if candidate != board[i][j]:
break
elif j == len(board[i]) - 1:
won = candidate
if won > 0:
return won
# Check columns
for j in range(len(board[0])):
candidate = 0
for i in range(len(board)):
# Make sure there are no gaps
if board[i][j] == 0:
break
# Identify the front-runner
if candidate == 0:
candidate = board[i][j]
# Determine whether the front-runner has all the slots
if candidate != board[i][j]:
break
elif i == len(board) - 1:
won = candidate
if won > 0:
return won
# Check diagonals
candidate = 0
for i in range(len(board)):
if board[i][i] == 0:
break
if candidate == 0:
candidate = board[i][i]
if candidate != board[i][i]:
break
elif i == len(board) - 1:
won = candidate
if won > 0:
return won
candidate = 0
for i in range(len(board)):
if board[2 - i][2 - i] == 0:
break
if candidate == 0:
candidate = board[2 - i][2 - i]
if candidate != board[2 - i][2 - i]:
break
elif i == len(board) - 1:
won = candidate
if won > 0:
return won
# Still no winner?
if (len(getMoves(board)) == 0):
# It's a draw
return 0
else:
# Still more moves to make
return -1
def gamesToWinLossData(games):
X = []
y = []
for game in games:
winner = getWinner(movesToBoard(game))
for move in range(len(game)):
X.append(movesToBoard(game[:(move + 1)]))
y.append(winner)
X = np.array(X).reshape((-1, 9))
y = to_categorical(y)
# Return an appropriate train/test split
trainNum = int(len(X) * 0.8)
return (X[:trainNum], X[trainNum:], y[:trainNum], y[trainNum:])
def simulateGame(p1=None, p2=None, rnd=0):
history = []
board = initBoard()
playerToMove = 1
while getWinner(board) == -1:
# Chose a move (random or use a player model if provided)
move = None
if playerToMove == 1 and p1 != None:
move = bestMove(board, p1, playerToMove, rnd)
elif playerToMove == 2 and p2 != None:
move = bestMove(board, p2, playerToMove, rnd)
else:
moves = getMoves(board)
move = moves[random.randint(0, len(moves) - 1)]
# Make the move
board[move[0]][move[1]] = playerToMove
# Add the move to the history
history.append((playerToMove, move))
# Switch the active player
playerToMove = 1 if playerToMove == 2 else 2
print("Simuating Games Please Stand By.....")
return history
print("Simulating Games .....")
games = [simulateGame() for _ in range(10000)]
print("Finished Simulating Games")
print("Begin Modle Compile")
print("Compiling Model .....")
model = getModel()
print("Model Compiled Successfully")
X_train, X_test, y_train, y_test = gamesToWinLossData(games)
print("Beginning Training")
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=100)
print("Training Finished")
``` |
{
"source": "500errorsite/UserBot",
"score": 2
} |
#### File: userbot/modules/helper.py
```python
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, man_cmd
@man_cmd(pattern="ihelp$")
async def usit(event):
me = await event.client.get_me()
await edit_or_reply(
event,
f"**Hai {me.first_name} Kalo Anda Tidak Tau Perintah Untuk Memerintah Ku Ketik** `.help` Atau Bisa Minta Bantuan Ke:\n"
f"✣ **Group Support :** [Sharing Userbot](t.me/sharinguserbot)\n"
f"✣ **Channel Man :** [Lunatic0de](t.me/Lunatic0de)\n"
f"✣ **Owner Repo :** [Risman](t.me/mrismanaziz)\n"
f"✣ **Repo :** [SandBot](https://github.com/500errorsite/SandBot)\n",
)
@man_cmd(pattern="listvar$")
async def var(event):
await edit_or_reply(
event,
"**Daftar Lengkap Vars Dari SandBot:** [KLIK DISINI](https://telegra.ph/List-Variabel-Heroku-untuk-SandBot-09-22)",
)
CMD_HELP.update(
{
"helper": f"**Plugin : **`helper`\
\n\n • **Syntax :** `{cmd}ihelp`\
\n • **Function : **Bantuan Untuk SandBot.\
\n\n • **Syntax :** `{cmd}listvar`\
\n • **Function : **Melihat Daftar Vars.\
\n\n • **Syntax :** `{cmd}repo`\
\n • **Function : **Melihat Repository SandBot.\
\n\n • **Syntax :** `{cmd}string`\
\n • **Function : **Link untuk mengambil String SandBot.\
"
}
)
``` |
{
"source": "500-Error/weixin-SDK",
"score": 3
} |
#### File: weixin-SDK/tests/test_config.py
```python
from weixin.config import *
def test_config():
config = Config(a=0, b=1)
assert config.a == 0
assert config.b == 1
config = Config({'a': 0, 'b': 1})
assert config.a == 0
assert config.b == 1
class ConfigObject:
A = 0
B = 1
a = 9
_p = 999
config = Config()
config.from_object(ConfigObject)
assert config.A == 0
assert config.B == 1
assert config.a == 9
assert config._p == 999
config = Config()
config.from_object(ConfigObject, lower_keys=True)
# 大写小写的A同时存在,配置被覆盖不做检查
assert config.b == 1
assert config._p == 999
config = Config()
config.from_json('{"a": 0, "b": 1}')
assert config.a == 0
assert config.b == 1
```
#### File: weixin-SDK/tests/test_message_parse.py
```python
from weixin.parse import *
def test_text_msg():
print('test_text_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[你好]]></Content>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'text'
assert msg.Content == '你好'
def test_image_msg():
print('test_image_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<PicUrl><![CDATA[url]]></PicUrl>
<MediaId><![CDATA[media_id]]></MediaId>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'image'
assert msg.PicUrl == 'url'
assert msg.MediaId == 'media_id'
def test_voice_msg():
print('test_voice_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<Format><![CDATA[Format]]></Format>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'voice'
assert msg.MediaId == 'media_id'
assert msg.Format == 'Format'
def test_video_msg():
print('test_video_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<ThumbMediaId><![CDATA[thumb_media_id]]></ThumbMediaId>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'video'
assert msg.MediaId == 'media_id'
assert msg.ThumbMediaId == 'thumb_media_id'
def test_shortvideo_msg():
print('test_shortvideo_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[shortvideo]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<ThumbMediaId><![CDATA[thumb_media_id]]></ThumbMediaId>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'shortvideo'
assert msg.MediaId == 'media_id'
assert msg.ThumbMediaId == 'thumb_media_id'
def test_location_msg():
print('test_location_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[location]]></MsgType>
<Location_X>23.134521</Location_X>
<Location_Y>113.358803</Location_Y>
<Scale>20</Scale>
<Label><![CDATA[位置信息]]></Label>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'location'
assert msg.Location_X == '23.134521'
assert msg.Location_Y == '113.358803'
assert msg.Scale == '20'
assert msg.Label == '位置信息'
def test_link_msg():
print('test_link_msg')
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[link]]></MsgType>
<Title><![CDATA[公众平台官网链接]]></Title>
<Description><![CDATA[公众平台官网链接]]></Description>
<Url><![CDATA[url]]></Url>
<MsgId>123456789</MsgId>
</xml>
"""
msg = WeixinMsg(xml)
assert msg.ToUserName == 'toUser'
assert msg.FromUserName == 'fromUser'
assert msg.CreateTime == '123456789'
assert msg.MsgId == '123456789'
assert msg.MsgType == 'link'
assert msg.Title == '公众平台官网链接'
assert msg.Description == '公众平台官网链接'
assert msg.Url == 'url'
if __name__ == "__main__":
test_text_msg()
test_image_msg()
test_voice_msg()
test_video_msg()
test_shortvideo_msg()
test_location_msg()
test_link_msg()
```
#### File: weixin-SDK/weixin/reply.py
```python
from .utils import get_timestamp, join_sequence
def cdata_escape(escape_s):
if escape_s is not None:
escape_s = escape_s.replace("]]>", ']]>')
escape_s = escape_s.replace("</xml>", '</xml>')
return escape_s
def _make_node(k, v):
if v : return "<{node}><![CDATA[{value}]]></{node}>".format(node=k, value=v)
# 空字符串
return ""
class BaseWeixinReply(dict):
def __init__(self):
self._marked = False
def postmark(self, from_msg, created=None):
self['ToUserName'] = from_msg.FromUserName
self['FromUserName'] = from_msg.ToUserName
self['CreateTime'] = created or int(get_timestamp())
self._marked = True
def _generate(self):
raise NotImplementedError
@property
def xml(self):
# generate xml
return self._generate()
class TextReply(BaseWeixinReply):
def __init__(self, content):
super(TextReply, self).__init__()
self['Content'] = cdata_escape(content)
def _generate(self):
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[text]]></MsgType>" \
"<Content><![CDATA[{Content}]]></Content>" \
"</xml>"
return template.format(**self)
class ImageReply(BaseWeixinReply):
def __init__(self, media_id):
super(ImageReply, self).__init__()
self['MediaId'] = media_id
def _generate(self):
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[image]]></MsgType>" \
"<Image>" \
"<MediaId><![CDATA[{MediaId}]]></MediaId>" \
"</Image>" \
"</xml>"
return template.format(**self)
class VoiceReply(BaseWeixinReply):
def __init__(self, media_id):
super(VoiceReply, self).__init__()
self['MediaId'] = media_id
def _generate(self):
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[voice]]></MsgType>" \
"<Voice>" \
"<MediaId><![CDATA[{MediaId}]]></MediaId>" \
"</Voice>" \
"</xml>"
return template.format(**self)
class VideoReply(BaseWeixinReply):
def __init__(self, media_id, title=None, description=None):
super(VideoReply, self).__init__()
title = cdata_escape(title)
description = cdata_escape(description)
self['MediaId'] = media_id
self['TitleNode'] = _make_node("Title", title)
self['DescriptionNode'] = _make_node("Description", description)
def _generate(self):
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[video]]></MsgType>" \
"<Video>" \
"<MediaId><![CDATA[{MediaId}]]></MediaId>" \
"{TitleNode}{DescriptionNode}" \
"</Video>" \
"</xml>"
return template.format(**self)
class MusicReply(BaseWeixinReply):
def __init__(self, thumb_media_id, url=None, hq_url=None, title=None, description=None):
super(MusicReply, self).__init__()
title = cdata_escape(title)
description = cdata_escape(description)
self['ThumbMediaId'] = thumb_media_id
self['TitleNode'] = _make_node ("Title", title)
self['DescriptionNode'] = _make_node ("Description", description)
self['MusicUrlNode'] = _make_node ("MusicUrl", url)
self['HQMusicUrlNode'] = _make_node ("HQMusicUrl", hq_url)
def _generate(self):
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[music]]></MsgType>" \
"<Music>" \
"{TitleNode}{DescriptionNode}{MusicUrlNode}{HQMusicUrlNode}" \
"<ThumbMediaId><![CDATA[{ThumbMediaId}]]></ThumbMediaId>" \
"</Music>" \
"</xml>"
return template.format(**self)
class ArticleReply(BaseWeixinReply):
def __init__(self, articles=None):
super(ArticleReply, self).__init__()
self.articles = articles or []
def _generate(self):
def make_item(articles):
item = \
"<item>" \
"<Title><![CDATA[{Title}]]></Title>" \
"<Description><![CDATA[{Description}]]></Description>" \
"<PicUrl><![CDATA[{PicUrl}]]></PicUrl>" \
"<Url><![CDATA[{Url}]]></Url>" \
"</item>"
def set_default(article):
article.setdefault("Description", "")
article.setdefault("PicUrl", "")
article.setdefault("Url", "")
a_title = article['Title']
a_desc = article['Description']
article['Title'] = cdata_escape(a_title)
article['Description'] = cdata_escape(a_desc)
return article
return join_sequence(
item.format(**set_default(ar)) for ar in articles
)
self['Articles'] = make_item(self.articles)
self['Count'] = len(self.articles)
template = \
"<xml>" \
"<ToUserName><![CDATA[{ToUserName}]]></ToUserName>" \
"<FromUserName><![CDATA[{FromUserName}]]></FromUserName>" \
"<CreateTime>{CreateTime}</CreateTime>" \
"<MsgType><![CDATA[news]]></MsgType>" \
"<ArticleCount>{Count}</ArticleCount>" \
"<Articles>{Articles}</Articles>" \
"</xml>"
return template.format(**self)
def add_article(self, title, description=None, url=None, image_url=None):
ar = dict()
ar['Title'] = title
if url: ar['Url'] = url
if image_url: ar['PicUrl'] = image_url
if description: ar['Description'] = description
self.articles.append(ar)
class EncryptReply(BaseWeixinReply):
def __init__(self, enctext, nonce, timestamp, signature):
super(EncryptReply, self).__init__()
self['Encrypt'] = enctext
self['Nonce'] = nonce
self['TimeStamp'] = timestamp
self['MsgSignature'] = signature
def postmark(self, from_msg):
self._marked = True
def _generate(self):
template = \
"<xml>"\
"<Encrypt><![CDATA[{Encrypt}]]></Encrypt>" \
"<MsgSignature><![CDATA[{MsgSignature}]]></MsgSignature>" \
"<TimeStamp>{TimeStamp}</TimeStamp>" \
"<Nonce><![CDATA[{Nonce}]]></Nonce>" \
"</xml>"
return template.format(**self)
class CustomMsgReply(object):
@staticmethod
def text(openid, content):
return {
"touser": openid,
"msgtype": "text",
"text": {
"content": content
}
}
@staticmethod
def image(openid, media_id):
return {
"touser": openid,
"msgtype": "image",
"image": {
"media_id": media_id
}
}
@staticmethod
def voice(openid, media_id):
return {
"touser": openid,
"msgtype": "voice",
"voice": {
"media_id": media_id
}
}
@staticmethod
def video(openid, media_id, thumb_media_id=None, title=None, desc=None):
return {
"touser": openid,
"msgtype": "video",
"video": {
"media_id": media_id,
"thumb_media_id": thumb_media_id,
"title": title,
"description": desc
}
}
@staticmethod
def music(openid, url, hq_url, thumb_media_id, title=None, desc=None):
return {
"touser": openid,
"msgtype": "music",
"music": {
"title": title,
"description": desc,
"musicurl": url,
"hqmusicurl": hq_url,
"thumb_media_id": thumb_media_id
}
}
@staticmethod
def article(openid, articles):
return {
"touser": openid,
"msgtype": "news",
"news": {
"articles": articles
}
}
Text = TextReply
Image = ImageReply
Voice = VoiceReply
Video = VideoReply
Music = MusicReply
Article = ArticleReply
Encrypt = EncryptReply
CustomMsg = CustomMsgReply
```
#### File: weixin-SDK/weixin/request.py
```python
from xml.parsers.expat import ExpatError
from .utils import AttrNone
from .session import Session
from .parse import WeixinMsg
from .reply import EncryptReply, BaseWeixinReply
class WeixinRequest(object):
def __init__(self, config, xmldoc):
self.config = config
self._raw_xml_ = xmldoc
self._response_xml_ = None
@property
def session(self):
if not hasattr(self, '_weixin_session_'):
self._weixin_session_ = Session(self)
return self._weixin_session_
@property
def message(self):
if not hasattr(self, '_weixin_msg_'):
self._weixin_msg_ = AttrNone()
if self._raw_xml_:
try:
self._weixin_msg_ = WeixinMsg(self._raw_xml_)
encrypted_msg = self._weixin_msg_.Encrypt
cryptor = self.config.cryptor
if encrypted_msg and cryptor:
# 解密被加密的消息Encrypt
body = cryptor.decrypt(encrypted_msg)
del self._weixin_msg_
self._weixin_msg_ = WeixinMsg(body)
elif encrypted_msg or cryptor:
raise Exception(
"message {0}encrypted but enc_aeskey is {1}set.".format(
"" if encrypted_msg else "not ",
"" if cryptor else "not ",
)
)
except (ExpatError, KeyError):
# 非正常xml文本或者xml格式不符合要求
raise
return self._weixin_msg_
def _build_msg(self, msg):
if isinstance(msg, BaseWeixinReply):
if not msg._marked:
msg.postmark(self.message)
if self.config.cryptor:
# 设置了消息加解密, 加密明文消息
kw = self.config.cryptor.encrypt(msg.xml)
msg = EncryptReply(**kw)
return msg
def render(self, template, *args, **kwargs):
msg = template(*args, **kwargs)
self._response_xml_ = self._build_msg(msg).xml
return
def response(self, msg):
self._response_xml_ = self._build_msg(msg).xml
def get_response_xml(self, default=None):
return self._response_xml_ or default
``` |
{
"source": "500kg/learn2branch",
"score": 2
} |
#### File: 500kg/learn2branch/03_train_competitor.py
```python
import pickle
import os
import argparse
import numpy as np
import utilities
import pathlib
from utilities import log, load_flat_samples
def load_samples(filenames, feat_type, label_type, augment, qbnorm, size_limit, logfile=None):
x, y, ncands = [], [], []
total_ncands = 0
for i, filename in enumerate(filenames):
cand_x, cand_y, best = load_flat_samples(filename, feat_type, label_type, augment, qbnorm)
x.append(cand_x)
y.append(cand_y)
ncands.append(cand_x.shape[0])
total_ncands += ncands[-1]
if (i + 1) % 100 == 0:
log(f" {i+1}/{len(filenames)} files processed ({total_ncands} candidate variables)", logfile)
if total_ncands >= size_limit:
log(f" dataset size limit reached ({size_limit} candidate variables)", logfile)
break
x = np.concatenate(x)
y = np.concatenate(y)
ncands = np.asarray(ncands)
if total_ncands > size_limit:
x = x[:size_limit]
y = y[:size_limit]
ncands[-1] -= total_ncands - size_limit
return x, y, ncands
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-m', '--model',
help='Model to be trained.',
type=str,
choices=['svmrank', 'extratrees', 'lambdamart'],
)
parser.add_argument(
'-s', '--seed',
help='Random generator seed.',
type=utilities.valid_seed,
default=0,
)
args = parser.parse_args()
feats_type = 'nbr_maxminmean'
problem_folders = {
'setcover': 'setcover/500r_1000c_0.05d',
'cauctions': 'cauctions/100_500',
'facilities': 'facilities/100_100_5',
'indset': 'indset/500_4',
}
problem_folder = problem_folders[args.problem]
if args.model == 'extratrees':
train_max_size = 250000
valid_max_size = 100000
feat_type = 'gcnn_agg'
feat_qbnorm = False
feat_augment = False
label_type = 'scores'
elif args.model == 'lambdamart':
train_max_size = 250000
valid_max_size = 100000
feat_type = 'khalil'
feat_qbnorm = True
feat_augment = False
label_type = 'bipartite_ranks'
elif args.model == 'svmrank':
train_max_size = 250000
valid_max_size = 100000
feat_type = 'khalil'
feat_qbnorm = True
feat_augment = True
label_type = 'bipartite_ranks'
rng = np.random.RandomState(args.seed)
running_dir = f"trained_models/{args.problem}/{args.model}_{feat_type}/{args.seed}"
os.makedirs(running_dir)
logfile = f"{running_dir}/log.txt"
log(f"Logfile for {args.model} model on {args.problem} with seed {args.seed}", logfile)
# Data loading
train_files = list(pathlib.Path(f'data/samples/{problem_folder}/train').glob('sample_*.pkl'))
valid_files = list(pathlib.Path(f'data/samples/{problem_folder}/valid').glob('sample_*.pkl'))
log(f"{len(train_files)} training files", logfile)
log(f"{len(valid_files)} validation files", logfile)
log("Loading training samples", logfile)
train_x, train_y, train_ncands = load_samples(
rng.permutation(train_files),
feat_type, label_type, feat_augment, feat_qbnorm,
train_max_size, logfile)
log(f" {train_x.shape[0]} training samples", logfile)
log("Loading validation samples", logfile)
valid_x, valid_y, valid_ncands = load_samples(
valid_files,
feat_type, label_type, feat_augment, feat_qbnorm,
valid_max_size, logfile)
log(f" {valid_x.shape[0]} validation samples", logfile)
# Data normalization
log("Normalizing datasets", logfile)
x_shift = train_x.mean(axis=0)
x_scale = train_x.std(axis=0)
x_scale[x_scale == 0] = 1
valid_x = (valid_x - x_shift) / x_scale
train_x = (train_x - x_shift) / x_scale
# Saving feature parameters
with open(f"{running_dir}/feat_specs.pkl", "wb") as file:
pickle.dump({
'type': feat_type,
'augment': feat_augment,
'qbnorm': feat_qbnorm,
}, file)
# save normalization parameters
with open(f"{running_dir}/normalization.pkl", "wb") as f:
pickle.dump((x_shift, x_scale), f)
log("Starting training", logfile)
if args.model == 'extratrees':
from sklearn.ensemble import ExtraTreesRegressor
# Training
model = ExtraTreesRegressor(
n_estimators=100,
random_state=rng,)
model.verbose = True
model.fit(train_x, train_y)
model.verbose = False
# Saving model
with open(f"{running_dir}/model.pkl", "wb") as file:
pickle.dump(model, file)
# Testing
loss = np.mean((model.predict(valid_x) - valid_y) ** 2)
log(f"Validation RMSE: {np.sqrt(loss):.2f}", logfile)
elif args.model == 'lambdamart':
import pyltr
train_qids = np.repeat(np.arange(len(train_ncands)), train_ncands)
valid_qids = np.repeat(np.arange(len(valid_ncands)), valid_ncands)
# Training
model = pyltr.models.LambdaMART(verbose=1, random_state=rng, n_estimators=500)
model.fit(train_x, train_y, train_qids,
monitor=pyltr.models.monitors.ValidationMonitor(
valid_x, valid_y, valid_qids, metric=model.metric))
# Saving model
with open(f"{running_dir}/model.pkl", "wb") as file:
pickle.dump(model, file)
# Testing
loss = model.metric.calc_mean(valid_qids, valid_y, model.predict(valid_x))
log(f"Validation log-NDCG: {np.log(loss)}", logfile)
elif args.model == 'svmrank':
import svmrank
train_qids = np.repeat(np.arange(len(train_ncands)), train_ncands)
valid_qids = np.repeat(np.arange(len(valid_ncands)), valid_ncands)
# Training (includes hyper-parameter tuning)
best_loss = np.inf
best_model = None
for c in (1e-3, 1e-2, 1e-1, 1e0):
log(f"C: {c}", logfile)
model = svmrank.Model({
'-c': c * len(train_ncands), # c_light = c_rank / n
'-v': 1,
'-y': 0,
'-l': 2,
})
model.fit(train_x, train_y, train_qids)
loss = model.loss(train_y, model(train_x, train_qids), train_qids)
log(f" training loss: {loss}", logfile)
loss = model.loss(valid_y, model(valid_x, valid_qids), valid_qids)
log(f" validation loss: {loss}", logfile)
if loss < best_loss:
best_model = model
best_loss = loss
best_c = c
# save model
model.write(f"{running_dir}/model.txt")
log(f"Best model with C={best_c}, validation loss: {best_loss}", logfile)
``` |
{
"source": "50183816/lineregression",
"score": 3
} |
#### File: lineregression/Ensemble/Adaboosting_homework.py
```python
import numpy as np
from sklearn.tree import DecisionTreeClassifier
def getsplit(x,y,d):
#问题:固定了划分,但是实际上是要看具体值去确定正负例在左右的分布
min_error_rate = 1
min_error_rate_index = 0
thresholdvalue = x[0]
predict = []
for i in np.arange(len(x)-1):
pos_split = list(map(lambda yv: 1 if yv<0 else 0, y[0:i+1]))
neg_split = list(map(lambda yv: 1 if yv>0 else 0, y[i+1:]))
combined = np.concatenate((pos_split,neg_split))
error_rate = np.sum(combined * d)
if(error_rate < min_error_rate):
min_error_rate = error_rate
min_error_rate_index = i
return min_error_rate,min_error_rate_index
if __name__ == '__main__':
#1. 准备样本数据
X= np.array([0,1,2,3,4,5,6,7,8,9]).reshape(-1,1)
Y = np.array([1,1,1,-1,-1,-1,1,1,1,-1]).reshape(-1,1)
#2. 设置初始样本权重D
d0 = np.ones(len(X))
d0 = d0 / len(d0)#初始权重,设为均值
#3 训练第一个决策树
# tree = DecisionTreeClassifier()
# tree.fit(X,Y,sample_weight=d0)
# predicted = tree.predict(X)
# print(predicted)
#3 划分样本
for r in np.arange(5):
_,splitindex=getsplit(X,Y,d0)
print('第{}轮划分的属性为{}'.format(r,splitindex))
# print(X[:,0]<=splitindex)
predict1 = np.array(Y.ravel()) #[1 if ] np.array([1,1,1,-1,-1,-1,-1,-1,-1,-1,-1])
predict1[X[:,0]<=splitindex] = 1
predict1[X[:,0]>splitindex] = -1
# print(predict1)
pred_true_values = zip(Y.ravel(),predict1,d0)
#4.计算新的样本权重D1,epsilon 和alpha值
sum = 0
for y,p,d in pred_true_values:
sum += (0 if y==p else 1)*d
# print((y,p,d))
# print('epsilon_1 = {}'.format(sum))
alpha1 = 0.5 * np.log1p((1-sum)/sum)
print('alpha{} = {}'.format(r+1,alpha1))
sum = 0
d1=[]
pred_true_values = zip(Y.ravel(),predict1,d0)
for y,p,d in pred_true_values:
# print((y,p,d))
d1.append(d*np.exp(-1*y*p*alpha1))
sum += d*np.exp(-1*y*p*alpha1)
# print(d1)
# print(sum)
d0 = np.array(d1)/sum
# print(d0)
# min_erro_rate,min_erro_index = getsplit(X,Y,d1)
# print((min_erro_rate,min_erro_index))
#5. 训练下一个模型
```
#### File: lineregression/LinearRegression/linearRegression_Resolver.py
```python
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
#线性回归,解析式求解
class DirectResolverLR():
def __init__(self):
self.coef_=[]
self.intercept_=[]
#训练模型基于公式: θ = (X.T*X + λI)*X.T*Y
def fit(self,x,y,lbd):
m,_ = np.shape(x)
intercept = np.ones((m,1))
x = np.concatenate((x,intercept),axis=1)
_,n = np.shape(x)
addition = np.identity(n) * lbd
tmp = np.linalg.inv((np.dot(x.T,x)+addition))
theta = np.dot(np.dot(tmp,x.T),y)
self.coef_ = theta
return theta
def predict(self,x):
m,_ = np.shape(x)
intercept = np.ones((m, 1))
x = np.concatenate((x, intercept), axis=1)
y = np.dot(x,self.coef_)
return y
def score(self,x,y):
predy =self.predict(x)
result = r2_score(y,predy)
return result
if __name__ == '__main__':
# 1. Load data
data = pd.read_csv('../datas/household_power_consumption_1000.txt', sep=';')
# data.info()
# print(data.head())
# 2 Get property attribute and target antribute x, y
x = data.iloc[:, 2:4].astype(np.float)
y = data.iloc[:, 5].astype(np.float)
# scalar = StandardScaler()
# x = scalar.fit_transform(x, y)
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=2)
lr = DirectResolverLR()
theta = lr.fit(train_x,train_y,0)
print(theta)
print('训练集score=%.2f'%lr.score(train_x,train_y))
print('测试集score=%.2f' % lr.score(test_x, test_y))
t = np.arange(len(test_x))
plt.figure(facecolor='w')
plt.plot(t, test_y, 'r-', label=u'真实值')
plt.plot(t, lr.predict(test_x), 'b-', label=u'预测值')
plt.legend(loc='lower right')
plt.show()
```
#### File: lineregression/Projects/random_number_generator.py
```python
import numpy as np
import pandas as pd
# np.random.seed(22)
def random1(p):
return 0 if np.random.random() <=p else 1
def random2(p):
v = random1(p)
if p == 0.5:
return v
if p > 0.5:
if v == 1:
return v
v=random1(0.5/p)
return v
else:
if v == 0:
return v
v = random1((0.5-p)/(1-p))
return v
def random3(n,p):
if n %2 == 0:
s = int(np.ceil(np.log2(n)))
v = ''.join(map(lambda x:str(int(x)),[random2(p) for i in np.arange(s)]))
print(v)
v = int(v,2)+1
else:
n = n * 2
v = random3(n,p)
v = np.ceil(v / 2)
if v > n:
v = random3(n,p)
return v
if __name__ == '__main__':
t = [random1(0.7) for i in np.arange(100000)]
print(sum(t))
print(sum([random2(0.3) for i in np.arange(100000)]))
random3(10, 0.3)
# t = [random3(10, 0.3) for i in np.arange(10000)]
# pdata = pd.DataFrame(t,columns=['number'])
# print(pdata)
# print(pdata.groupby(by=['number'])['number'].count())
``` |
{
"source": "501Good/lexicon-enhanced-lemmatization",
"score": 3
} |
#### File: lexenlem/lemmatizers/apertium.py
```python
import apertium
from collections import OrderedDict
def lemmatize(word, lang):
analysis = apertium.analyze(lang, word)
if analysis:
lemmas = list(OrderedDict.fromkeys([x.split('<')[0] for x in str(analysis[0]).split('/')[1:]]))
else:
lemmas = []
return list(''.join(lemmas))
``` |
{
"source": "501st-alpha1/cronicle",
"score": 2
} |
#### File: 501st-alpha1/cronicle/setup.py
```python
from setuptools import setup
def coerce_file(fn):
"""Coerce file content to something useful for setuptools.setup(), turn :
.py into mock object by extracting __special__ attributes values
.md into rst text. Remove images with "[nopypi" alt text and emojis
:url: https://github.com/Kraymer/setupgoon
"""
import ast
import os
import re
import subprocess
import tempfile
import time # noqa
text = open(os.path.join(os.path.dirname(__file__), fn)).read()
if fn.endswith('.py'): # extract version, docstring etc out of python file
mock = type('mock', (object,), {})()
for attr in ('version', 'author', 'author_email', 'license', 'url'):
regex = r'^__%s__\s*=\s*[\'"]([^\'"]*)[\'"]$' % attr
m = re.search(regex, text, re.MULTILINE)
setattr(mock, attr, m.group(1) if m else None)
mock.docstring = ast.get_docstring(ast.parse(text))
if mock.version.endswith('dev'):
mock.version += str(int(time.time()))
return mock
if fn.endswith('md'): # convert md to rest on pypi package upload
text = '\n'.join([l for l in text.split('\n') if '[nopypi' not in l])
text = re.sub(r':\S+:', '', text) # no emojis
with tempfile.NamedTemporaryFile(mode='w+') as tmp:
tmp.write(text)
tmp.flush()
try:
text, stderr = subprocess.Popen(['pandoc', '-t', 'rst', tmp.name],
stdout=subprocess.PIPE).communicate()
except:
pass
try:
return text.decode('utf-8')
except AttributeError:
return text
setup(name='cronicle',
version=coerce_file('cronicle/__init__.py').version,
description=coerce_file('cronicle/__init__.py').docstring,
long_description=coerce_file('README.md'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/KraYmer/cronicle',
license='MIT',
platforms='ALL',
packages=['cronicle', ],
entry_points={
'console_scripts': [
'cronicle = cronicle:cronicle_cli',
],
},
install_requires=coerce_file('requirements.txt').split('\n'),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Environment :: Console',
'Topic :: System :: Filesystems',
],
keywords="cron rotate backup",
)
``` |
{
"source": "502E532E/python-quantities",
"score": 3
} |
#### File: python-quantities/quantities/decorators.py
```python
import inspect
import os
import re
import string
import sys
import warnings
from functools import partial, wraps
def memoize(f, cache={}):
@wraps(f)
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key].copy()
return g
class with_doc:
"""
This decorator combines the docstrings of the provided and decorated objects
to produce the final docstring for the decorated object.
"""
def __init__(self, method, use_header=True):
self.method = method
if use_header:
self.header = \
"""
Notes
-----
"""
else:
self.header = ''
def __call__(self, new_method):
new_doc = new_method.__doc__
original_doc = self.method.__doc__
header = self.header
if original_doc and new_doc:
new_method.__doc__ = """
{}
{}
{}
""".format(original_doc, header, new_doc)
elif original_doc:
new_method.__doc__ = original_doc
return new_method
def quantitizer(base_function,
handler_function = lambda *args, **kwargs: 1.0):
"""
wraps a function so that it works properly with physical quantities
(Quantities).
arguments:
base_function - the function to be wrapped
handler_function - a function which takes the same arguments as the
base_function and returns a Quantity (or tuple of Quantities)
which has (have) the units that the output of base_function should
have.
returns:
a wrapped version of base_function that takes the same arguments
and works with physical quantities. It will have almost the same
__name__ and almost the same __doc__.
"""
from .quantity import Quantity
# define a function which will wrap the base function so that it works
# with Quantities
def wrapped_function(*args , **kwargs):
# run the arguments through the handler function, this should
# return a tuple of Quantities which have the correct units
# for the output of the function we are wrapping
handler_quantities= handler_function( *args, **kwargs)
# now we need to turn Quantities into ndarrays so they behave
# correctly
#
# first we simplify all units so that addition and subtraction work
# there may be another way to ensure this, but I do not have any good
# ideas
# in order to modify the args tuple, we have to turn it into a list
args = list(args)
#replace all the quantities in the argument list with ndarrays
for i in range(len(args)):
#test if the argument is a quantity
if isinstance(args[i], Quantity):
#convert the units to the base units
args[i] = args[i].simplified
#view the array as an ndarray
args[i] = args[i].magnitude
#convert the list back to a tuple so it can be used as an output
args = tuple (args)
#repalce all the quantities in the keyword argument
#dictionary with ndarrays
for i in kwargs:
#test if the argument is a quantity
if isinstance(kwargs[i], Quantity):
#convert the units to the base units
kwargs[i] = kwargs[i].simplifed()
#view the array as an ndarray
kwargs[i] = kwargs[i].magnitude
#get the result for the function
result = base_function( *args, **kwargs)
# since we have to modify the result, convert it to a list
result = list(result)
#iterate through the handler_quantities and get the correct
# units
length = min( len(handler_quantities) , len(result) )
for i in range(length):
# if the output of the handler is a quantity make the
# output of the wrapper function be a quantity with correct
# units
if isinstance(handler_quantities[i], Quantity):
# the results should have simplified units since that's what
# the inputs were (they were simplified earlier)
# (reasons why this would not be true?)
result[i] = Quantity(
result[i],
handler_quantities[i]
.dimensionality.simplified
)
#now convert the quantity to the appropriate units
result[i] = result[i].rescale(
handler_quantities[i].dimensionality)
#need to convert the result back to a tuple
result = tuple(result)
return result
# give the wrapped function a similar name to the base function
wrapped_function.__name__ = base_function.__name__ + "_QWrap"
# give the wrapped function a similar doc string to the base function's
# doc string but add an annotation to the beginning
wrapped_function.__doc__ = (
"this function has been wrapped to work with Quantities\n"
+ base_function.__doc__)
return wrapped_function
``` |
{
"source": "502ping/RetrievalBased_Chatbot",
"score": 3
} |
#### File: 502ping/RetrievalBased_Chatbot/Chatbot.py
```python
from __future__ import unicode_literals
import sys
import itchat
sys.path.append("../")
import jieba.analyse
import jieba
from gensim import corpora, models, similarities
import codecs
import requests
import re
import urllib.request as ul_re
import json
import jsonpath
Train_test = 'G:/PycharmProjects/Mychatterbot/data.txt'
Traintest = codecs.open(Train_test, 'rb').readlines()
Traintest = [w.strip() for w in Traintest]
# Chinese Word tokenizing with jieba
Traintest_word = []
for word in Traintest:
words_list = [words for words in jieba.cut(word)]
Traintest_word.append(words_list)
# Accessing the question part of the corpora
Train_test_Q = 'G:/PycharmProjects/Mychatterbot/data_Q.txt'
Traintest_Q = codecs.open(Train_test_Q, 'rb').readlines()
Traintest_Q = [word.strip() for word in Traintest_Q]
# Chinese Word tokenizing with jieba
Traintest_Question = []
for Question in Traintest_Q:
Q_list = [Q for Q in jieba.cut(Question)]
Traintest_Question.append(Q_list)
# Update dictionary from corpora
dictionary = corpora.Dictionary(Traintest_Question)
# Indexing dictionary
dictionary.keys()
# Convert document into the bag-of-words (BoW) format = list of (token_id, token_count) tuples.
corpus = [dictionary.doc2bow(doc) for doc in Traintest_Question]
# The corpus is now in vector space model
tfidf_model = models.TfidfModel(corpus)
tfidf_model.save('tfidf.model')
# Retrieve response from corpus
def retrieve_response(user_input):
doc_test = user_input
doc_test_list = [word for word in jieba.cut(doc_test)]
doc_test_vec = dictionary.doc2bow(doc_test_list)
# tfidf[corpus] to get the TF-IDF of each question
# get the similarities of each question in compare to the user input
index = similarities.SparseMatrixSimilarity(tfidf_model[corpus], num_features=len(dictionary.keys()))
sim = index[tfidf_model[doc_test_vec]]
# Sort each question with their similarities in descending order
SimilaritiesList = sorted(enumerate(sim), key=lambda item: -item[1])
num = 0
Result_tuple = SimilaritiesList[num] # get tuple, index, similarities获取元组 索引 相似度
Result_index = Result_tuple[0] # get index of the question with highest similarity to user_input
response_list = Traintest_word[Result_index] # QA response
Result_score = Result_tuple[1]
print("Similarity:" + str(Result_score))
newlist = response_list[response_list.index('\t'):] # Testing QA response result
response = ''
for res in newlist:
response += res
response = re.sub('\s', '', response)
response = response[:-3]
print("answer:" + response)
return response
def get_response(_info): # invoke turling robot
print(_info)
api_url = 'http://www.tuling123.com/openapi/api' # turling robot url
data = {
'key': '<KEY>', # turling api key
'info': _info,
'userid': 'wechat-robot',
}
r = requests.post(api_url, data=data).json()
print(r.get('text'))
return r
def booking_flights(user_text):
global flag
global return_session
# user_text=msg['Text']
if user_text == '结束':
flag = 0
return_session = ''
else:
url = 'https://aip.baidubce.com/rpc/2.0/unit/bot/chat?access_token=24.62800c1e0d94478b487ec04859411f5b.2592000.1546348586.282335-14856895'
post_data = {
"bot_session": return_session,
"log_id": "7758521",
"request": {
"bernard_level": 0,
"client_session": "{\"client_results\":\"\", \"candidate_options\":[]}",
"query": user_text,
"query_info": {
"asr_candidates": [],
"source": "KEYBOARD",
"type": "TEXT"
},
"updates": "",
"user_id": "88888"
},
"bot_id": "17037",
"version": "2.0"
}
encoded_data = json.dumps(post_data).encode('utf-8')
headers = {'Content-Type': 'application/json'}
request = ul_re.Request(url, data=encoded_data, headers=headers)
response = ul_re.urlopen(request)
html = response.read()
# data=json.loads(html)
# data["action_list"][0]['say']
jsonobj = json.loads(html)
say = jsonpath.jsonpath(jsonobj, '$..say')
print(say)
say_convert = ''.join(say)
if say_convert == '好的,已为您定好票!':
return_session = ''
flag = 0
# itchat.send_msg(say_convert, toUserName=msg['FromUserName'])
else:
bot_session = jsonpath.jsonpath(jsonobj, '$..bot_session')
session_convert = str(bot_session)
index = session_convert.find('session_id')
index = index - 1
session_id = session_convert[index:-5]
return_session = str("{" + session_id + "}")
# itchat.send_msg(say_convert, toUserName=msg['FromUserName'])
return say_convert
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg): # AutoReply to User
global return_session
print("收到好友消息:" + msg['Text']) # msg['Text']是好友发送的消息
# msg['Text'].encode('utf-8')
check1 = "笑话"
check2 = "天气"
check3 = "机票"
global flag
# print(type(msg['Text']))
word_list = jieba.cut(msg['Text'])
for word in word_list:
if word == str(check2):
flag = 2
if word == str(check1):
return get_response(msg['Text'])['text']
if word == str(check3):
flag = 1
if flag == 1:
user_text = msg['Text']
if user_text == '结束':
flag = 0
return_session = ''
else:
return booking_flights(user_text)
elif flag == 2:
print("收到好友消息:" + msg['Text'])
if msg['Content'] == '结束':
flag = 0
else:
response = get_response(msg['Text'])['text']
for word in jieba.cut(response):
if word == '气温':
flag = 0
itchat.send_msg(str(response), toUserName=msg['FromUserName'])
elif flag == 0:
user_input = msg['Text']
return retrieve_response(user_input)
if __name__ == '__main__':
global return_session
return_session = ''
global flag
flag = 0
itchat.auto_login() # hotReload = True, keep online
itchat.run()
``` |
{
"source": "502ping/Tkinter-wxPython",
"score": 3
} |
#### File: 502ping/Tkinter-wxPython/IR.py
```python
import wx
from nltk.tokenize import RegexpTokenizer
def combine_indexes(words_list_stemmed, files_list):
index, freq_word = create_inverted_index(files_list)
sum_freq = 0
index_list = []
print(words_list_stemmed)
for term in words_list_stemmed:
if term in index.keys():
print("Term is " + str(term))
print("Index term " + str(index[term]))
index_list.append(index[term])
sum_freq = sum_freq + freq_word[term]
print("Index list " + str(index_list))
if sum_freq:
index_result = list(set.intersection(*index_list))
print("Index result is " + str(index_result))
return index_result, sum_freq
else:
return ["No results found"], 0
def parse_input(word):
word = word.strip()
if ',' in word:
words_list = word.split(',')
elif ' ' in word:
words_list = word.split(' ')
elif ';' in word:
words_list = word.split(';')
elif ':' in word:
words_list = word.split(':')
else:
words_list = [word]
return words_list
def stemming(word):
# word = PorterStemmer().stem_word(word.lower())
return word
def create_inverted_index(files_list):
# creating a dictionary of words
index = dict()
# creating frequency of the words
freq_word = dict()
# reading multiple files and tokenizing the contents of the files
for f in files_list:
file_content = open(f).read()
tokenizer = RegexpTokenizer(r'\w+')
words = tokenizer.tokenize(file_content)
# creating inverted index data structure
for word in words:
# keeping all the words in lower case
word = stemming(word)
if word not in index.keys():
index[word] = [f]
else:
index[word].append(f)
for word in index.keys():
freq_word[word] = len(index[word])
index[word] = set(index[word])
return index, freq_word
def search(term, files_list):
words_list = parse_input(term)
print("WOrds list is " + str(words_list))
words_list_stemmed = [stemming(word.strip()) for word in words_list]
index_result, sum_freq = combine_indexes(words_list_stemmed, files_list)
return index_result, sum_freq
# if __name__ == '__main__':
# files_list = ['adventur.txt', 'apples.txt', 'hVDacrN0.html']
# search('html', files_list)
MAXIMUM_ALLOWED_FILES = 6
class SecondFrame(wx.Frame):
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="Here is what we found", size=(700, 300))
self.panel = wx.Panel(self)
# results list
self.files_result = []
# SIZER
self.windowSizer = wx.BoxSizer()
self.windowSizer.Add(self.panel, 1, wx.ALL | wx.EXPAND)
self.sizer = wx.GridBagSizer(0, 0)
# Results label
self.files_location_label = wx.StaticText(self.panel, -1, "Output of Results:", (10, 1))
self.sizer.Add(self.files_location_label, (10, 10))
def get_results_from_search(self, files_with_word, freq):
self.word_occ_label = wx.StaticText(self.panel, -1, str(freq) + " occurences", (10, 20))
self.sizer.Add(self.word_occ_label, (10, 25))
for i, files in enumerate(files_with_word):
self.files_result.append(wx.StaticText(self.panel, -1, files, (10, 20 + (i + 1) * 20)))
self.sizer.Add(self.files_result[-1], (10, 20 + (i + 1) * 20))
class gui(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Information Retrieval System [XIA Yuchen]', size=(700, 500))
self._defaultDirectory = "/home"
self.panel = wx.ScrolledWindow(self, wx.ID_ANY)
# SIZER
self.windowSizer = wx.BoxSizer()
self.windowSizer.Add(self.panel, 1, wx.ALL | wx.EXPAND)
self.sizer = wx.GridBagSizer(0, 0)
# List of files and no of it
self.no_of_files = 1
self.fileCtrl = []
# search
self.search_label = wx.StaticText(self.panel, -1, "Search Words: ", (100, 35))
self.sizer.Add(self.search_label, (100, 35))
self.search_name = wx.TextCtrl(self.panel, pos=(100, 66), size=(260, -1))
self.sizer.Add(self.search_name, (200, 30))
# Files location
self.files_location_label = wx.StaticText(self.panel, -1, "Add documnet index:", (100, 160))
# Adding file button
self.button = wx.Button(self.panel, label="Add Files", pos=(380, 200), size=(110, -1))
self.sizer.Add(self.button, (100, 150))
self.Bind(wx.EVT_BUTTON, self.add_files_button, self.button)
self.fileCtrl.append(wx.FilePickerCtrl(self.panel, pos=(100, 200), size=(260, -1)))
self.sizer.Add(self.fileCtrl[0], (100, 200))
# Removing file button
self.button_remove = wx.Button(self.panel, label="Remove Files", pos=(510, 200), size=(110, -1))
self.Bind(wx.EVT_BUTTON, self.remove_files_button, self.button_remove)
self.sizer.Add(self.button_remove, (100, 445))
# running the program button
self.button_run = wx.Button(self.panel, label="Search", pos=(380, 63), size=(110, -1))
self.Bind(wx.EVT_BUTTON, self.run_program, self.button_run)
self.sizer.Add(self.button_run, (500, 500))
def add_files_button(self, event):
if self.no_of_files <= MAXIMUM_ALLOWED_FILES:
height = self.no_of_files * 35 + 200
self.fileCtrl.append(wx.FilePickerCtrl(self.panel, pos=(100, height), size=(260, -1)))
self.sizer.Add(self.fileCtrl[self.no_of_files], (100, height))
self.no_of_files = self.no_of_files + 1
def remove_files_button(self, event):
self.sizer.Detach(self.fileCtrl[-1])
self.fileCtrl[-1].Destroy()
del self.fileCtrl[-1]
self.no_of_files = self.no_of_files - 1
def run_program(self, event):
frame = SecondFrame()
keyword = self.search_name.GetValue()
if not keyword:
box = wx.MessageDialog(None, 'Search Term Not Mentioned', 'Ivalid Request', wx.OK)
answer = box.ShowModal()
box.Destroy()
# getting files list from the file dialog
files_list = []
for file_path in self.fileCtrl:
files_list.append(file_path.GetPath())
files_list = filter(None, files_list)
print(files_list)
# sending the data to main.py
if files_list:
files_with_word, freq = search(keyword, files_list)
frame.get_results_from_search(files_with_word, freq)
frame.Show()
else:
box = wx.MessageDialog(None, 'Files not mentioned', 'Invalid Request', wx.OK)
answer = box.ShowModal()
box.Destroy()
if __name__ == '__main__':
app = wx.App(False)
frame = gui(parent=None, id=-1)
frame.Show()
app.MainLoop()
``` |
{
"source": "502y/Mod-Translation-Styleguide",
"score": 3
} |
#### File: Mod-Translation-Styleguide/tools/GithubKeywordQuery.py
```python
from re import compile
from time import sleep
from bs4 import BeautifulSoup
from urllib.error import HTTPError
from urllib.request import urlopen
from html import unescape
def make_url(page_num, _keyword): # query 'filename:zh_CN.lang keyword'
return 'https://github.com/search?p=' + str(
page_num) + '&q=filename%3A.zh_CN.lang+' + _keyword + '&type=Code&utf8=%E2%9C%93'
def open_url_retry(url):
try:
return urlopen(url)
except HTTPError as e:
if e.code == 429: # Too many requests
sleep(10)
print("[429 Error] Too many requests, retry in 10 secs")
return open_url_retry(url)
raise
keyword = input("Keyword: ")
i = 1
html_tags = compile(r'<.*?>')
content = BeautifulSoup(open_url_retry(make_url(i, keyword)), 'html.parser')
output_file = open('output.lang', 'w', encoding='UTF-8')
while True:
print("Page: " + str(i))
i += 1
for line in content.findAll('td', {'class': ['blob-code', 'blob-code-inner']}):
if keyword.lower() in str(line).lower() and '\\u' not in str(line):
result = unescape(html_tags.sub('', str(line))).strip()
if result:
print(result)
print(result, file=output_file)
if not content.find('a', {'class': 'next_page'}): # The next_page button of the last page is next_page.disabled
break
content = BeautifulSoup(open_url_retry(make_url(i, keyword)), 'html.parser')
output_file.close()
``` |
{
"source": "50417/gpt-2",
"score": 3
} |
#### File: gpt-2/preprocessor/restructure_mdl.py
```python
from utils import get_tokens,remove_extra_white_spaces
from model_info import model_info
from normalizer import get_normalize_block_name
from simulink_preprocess import remove_graphic_component,keep_minimum_component_in_block
import os
class Restructure_mdl():
'''
This class provides utilities to restructure the Mdl Files
'''
def __init__(self,simulink_model_name,output_dir='slflat_output'):
'''Instantiate a Restructure_mdl class variables .
args:
simulink_model_name: Name of Simulink model . eg. xyz.mdl
output_dir: directory where the bfs restructured simulink model will be saved. This is used for training data
'''
self._file = simulink_model_name
self._output_dir = output_dir
self._structure_to_extract = { 'System','Block','Line'} # Hierarchy level in mdl elements.
self._tmp_ext = '_TMP'# intermediate file that filters mdl file to only have System block { ... } . Output will be xyz_tmp.mdl
self._bfs_ext = '_bfs' # output file saved in output directory... file name is xyz_bfs.mdl
self._tmp_dir = 'slflat_tmp'
self._valid_chk_dir = "slflat_V_CHK"
if not os.path.exists(self._tmp_dir):
os.mkdir(self._tmp_dir)
if not os.path.exists(self._output_dir):
os.mkdir(self._output_dir)
if not os.path.exists(self._valid_chk_dir):
os.mkdir(self._valid_chk_dir)
def update_brace_count(self, line):
'''
keep track of brace count
args:
line: line of the file
'''
assert(self._brace_count >= 0)
self._brace_count += line.count('{')
self._brace_count -= line.count('}')
def extract_system_blk(self):
'''
extracts system block of the Simulink mdl file. Filter out everything else.
The structure left in the output is Model { Name toy System { Name toy Block { .....} ... }}
It also changes the name of the Simulink model to toy
And It also updates the model_info object which keeps track of blocks and its connections. Necessary for bfs restructuring.
returns:
filtered list of line in the original file. Each element of the list corresponds to the line in the original file.
'''
self._brace_count = 0
_processed_output = []
stack = []
stack_pop_brace_count = 0
blk_info = ''
line_info = ''
mdl_info = model_info()
with open(self._file, 'r') as file:
for line in file:
line = remove_extra_white_spaces(line)
tokens = get_tokens(line)
self.update_brace_count(line)
#if self._brace_count==1 and stack[-1] != "Model":
#print("here")
if "Model" == tokens[0]:
stack.append("Model")
_processed_output.append(line)
while get_tokens(line)[0] != "Name":
line = remove_extra_white_spaces(next(file))
_processed_output.append("Name toy")
elif tokens[0] == "System" and stack[-1] == "Model":
stack_pop_brace_count += 1
stack.append(tokens[0])
elif tokens[0] in self._structure_to_extract and stack[-1] != "Model":
stack_pop_brace_count += 1
stack.append(tokens[0])
if stack[-1] in self._structure_to_extract:
if tokens[0] == "System":
_processed_output.append(line)
while get_tokens(line)[0] != "Name":
line = remove_extra_white_spaces(next(file))
_processed_output.append("Name toy")
while get_tokens(line)[0] != 'Block':
line = remove_extra_white_spaces(next(file))
stack.append('Block')
_processed_output.append(line)
#print(next_line)
else:
_processed_output.append(line)
if stack[-1] == "Block":
blk_info += line + "\n"
elif stack[-1] == "Line":
line_info += line + "\n"
if stack_pop_brace_count == self._brace_count:
val = stack.pop()
if val == "Block":
#print(blk_info)
mdl_info.update_blk_info(blk_info)
blk_info = ''
elif val == "Line":
mdl_info.update_line_info(line_info)
line_info = ''
stack_pop_brace_count -= 1
if not stack:
try:
while True:
next_line = remove_extra_white_spaces(next(file))
_processed_output.append(next_line)
except StopIteration:
break
elif stack[-1] == "Model":
_processed_output.append(line)
return _processed_output, mdl_info
def restructure_single_mdl(self):
'''
Entry point for restructuring. Calls functions in a sequence.
Each functions returned value is the input parameter to next function in the sequence.
'''
tmp_filename = self._file.split('/')[-1] .split('.')[0]+ self._tmp_ext + '.mdl'
output_filename = self._file.split('/')[-1] .split('.')[0]+ self._bfs_ext + '.mdl'
tmp_path = os.path.join(self._tmp_dir,tmp_filename)
output_path = os.path.join(self._output_dir,output_filename)
output_filename = output_filename.replace('_bfs','_vbfs')
print(output_filename)
valid_chk_path = os.path.join(self._valid_chk_dir,output_filename)
tmp_output,model_info = self.extract_system_blk()
self.save_to_file(tmp_path, tmp_output)
src, dest = model_info.get_src_dst()
source_block = list(set(src).difference(set(dest)))
output,org_norm_name_dict = self.bfs_ordering_new(source_block, model_info)
#print("\n".join(output))
output = remove_graphic_component("\n".join(output))
self.save_to_file(output_path,output,org_norm_name_dict)
#self.save_to_file(output_path, output)
bfs_valid_output = self.bfs_ordering_validation(model_info)
self.save_to_file(valid_chk_path, bfs_valid_output)
#output = keep_minimum_component_in_block("\n".join(bfs_valid_output))
#print("\n".join(output))
#print(output)
def save_to_file(self, path, tmp_output,org_norm_name_dict = None):
'''
saves/write the list of line to a file.
args:
path : full path location of the file to which tmp_output is to be saved
tmp_output: list of lines . Each element of the list corresponds to the line in the original file.
org_norm_name_dict: dictionary with key : block name and value : normalized block name. Example clblk1 : a, clblk2: b and so on
'''
tmp = '\n'.join(tmp_output)
if org_norm_name_dict is not None:
for k,v in org_norm_name_dict.items():
tmp = tmp.replace(k,v)
with open(path,'w') as r:
r.write(tmp)
def bfs_ordering_validation(self,mdl_info):
'''
converts the BFS ordered Simulink file back to Simulink acceptable format: where Block {} defination comes first and then Line {} defination
Caveat: Block with Block Type Outport have to be defined end of the all other block defination arranged in ascending order based on its port number
while BLock Type Inport has to be defined beginning of the Block defination .
Generated model may not have Port number.--> Port "2". In that case add port number
args:
path: full path of the Simulink model file.
returns :
list of lines where each element corresponds to the line in the processed file.
'''
blk_lst, line_lst = mdl_info.get_write_ready_blk_conn_list()
_processed_output = ["Model {", "Name toy", "System {", "Name toy"]
_processed_output += blk_lst
_processed_output += line_lst
_processed_output += ['}','}']
return _processed_output
def bfs_ordering_new(self, source_block, model_info):
blk_names = [k for k in model_info.blk_info.keys()]
orig_normalized_blk_names = {}
name_counter = 1
output = ["Model {", "Name toy", "System {", "Name toy"]
unique_lines_added = set()
while len(source_block) != 0 or len(blk_names)!=0:
queue = []
if len(source_block) != 0:
queue.append(source_block[-1])
elif len(blk_names)!=0:
queue.append(blk_names[-1])
while len(queue) != 0 :
blk_visited = queue.pop(0)
if blk_visited in blk_names:
if blk_visited not in orig_normalized_blk_names:
orig_normalized_blk_names[blk_visited] = get_normalize_block_name(name_counter)
name_counter += 1
block_code = model_info.blk_info[blk_visited]
output.append(block_code) # adding block code
blk_names.remove(blk_visited)
if blk_visited in model_info.graph:
for dest_edge in model_info.graph[blk_visited]:
(dest, edge) = dest_edge
if edge not in unique_lines_added:
output.append(edge)
unique_lines_added.add(edge)
for d in dest:
if d in blk_names:
queue.append(d)
if blk_visited in model_info.graph_dest:
for src_edge in model_info.graph_dest[blk_visited]:
(src, edge) = src_edge
if edge not in unique_lines_added:
output.append(edge)
unique_lines_added.add(edge)
if src in blk_names:
queue.append(src)
if blk_visited in source_block:
source_block.remove(blk_visited)
output += ['}','}']
return output,orig_normalized_blk_names
directory ='/home/sls6964xx/Desktop/SLNET_Flat_train_compile/'#'/home/sls6964xx/Desktop/Simulink_sample/' #'/home/sls6964xx/Desktop/RandomMOdelGeneratorInMDLFormat/slsf/reportsneo/2020-09-02-14-27-55/success/'
count = 0
for files in os.listdir(directory):
count +=1
#print(count, " : ", files)
try:
processor = Restructure_mdl(os.path.join(directory,files))
processor.restructure_single_mdl()
except UnicodeDecodeError:
continue
except Exception as e:
print(e)
print("Error Processing : ", files)
continue
#print(os.path.join(directory,files))
# x = """slforge_100840958_166_bfs
# slforge_103070060_954_bfs
# slforge_109455323_290_bfs
# slforge_115263863_639_bfs
# slforge_116820486_221_bfs
# slforge_119186634_927_bfs
# slforge_133274971_348_bfs
# slforge_148707318_395_bfs
# slforge_149709169_219_bfs
# slforge_150345404_939_bfs
# slforge_163113637_196_bfs
# slforge_163854565_175_bfs
# slforge_181710094_759_bfs
# slforge_188187512_698_bfs
# slforge_189367667_469_bfs
# slforge_196111087_602_bfs
# slforge_20237467_545_bfs
# slforge_202430615_744_bfs
# slforge_202970885_712_bfs
# slforge_20481088_646_bfs
# slforge_207048500_218_bfs
# slforge_210634286_692_bfs
# slforge_212491956_153_bfs
# slforge_236447339_998_bfs
# slforge_239740698_723_bfs
# slforge_247780768_464_bfs
# slforge_25030590_338_bfs
# slforge_253621665_313_bfs
# slforge_259681077_575_bfs
# slforge_268651500_297_bfs
# slforge_269809720_749_bfs
# slforge_272727633_483_bfs
# slforge_27291099_748_bfs
# slforge_274629558_243_bfs
# slforge_281860843_280_bfs
# slforge_288255816_887_bfs
# slforge_305175039_251_bfs
# slforge_306276746_936_bfs
# slforge_306454103_402_bfs
# slforge_320213838_893_bfs
# slforge_320532988_724_bfs
# slforge_328622669_389_bfs
# slforge_354190334_811_bfs
# slforge_362651501_129_bfs
# slforge_363057801_463_bfs
# slforge_366782490_498_bfs
# slforge_368916070_75_bfs
# slforge_370965887_65_bfs
# slforge_375168036_195_bfs
# slforge_383621934_523_bfs
# slforge_385659974_145_bfs
# slforge_386064894_762_bfs
# slforge_414313477_261_bfs
# slforge_424015527_9_bfs
# slforge_44677512_731_bfs
# slforge_452162338_45_bfs
# slforge_45290452_365_bfs
# slforge_460001758_861_bfs
# slforge_460297509_29_bfs
# slforge_460717664_482_bfs
# slforge_463180451_414_bfs
# slforge_470271830_670_bfs
# slforge_488464452_891_bfs
# slforge_502656368_883_bfs
# slforge_503756812_121_bfs
# slforge_517493243_745_bfs
# slforge_517578157_995_bfs
# slforge_51784653_268_bfs
# slforge_518191258_766_bfs
# slforge_526578300_615_bfs
# slforge_531580834_679_bfs
# slforge_533772306_845_bfs
# slforge_544547412_512_bfs
# slforge_545863988_279_bfs
# slforge_554533673_570_bfs
# slforge_562144296_260_bfs
# slforge_576520496_795_bfs
# slforge_576695423_841_bfs
# slforge_581343008_213_bfs
# slforge_58619433_442_bfs
# slforge_589959545_690_bfs
# slforge_593669717_580_bfs
# slforge_603157444_717_bfs
# slforge_604864196_15_bfs
# slforge_629532953_696_bfs
# slforge_634359218_203_bfs
# slforge_63519104_453_bfs
# slforge_638420989_746_bfs
# slforge_64389875_235_bfs
# slforge_662331999_693_bfs
# slforge_685020956_528_bfs
# slforge_685874982_502_bfs
# slforge_689075942_894_bfs
# slforge_691789268_933_bfs
# slforge_698142696_332_bfs
# slforge_703836517_185_bfs
# slforge_726593191_504_bfs
# slforge_731631655_816_bfs
# slforge_739386461_787_bfs
# slforge_741391199_439_bfs
# slforge_747157772_39_bfs
# slforge_748022232_849_bfs
# slforge_752206838_518_bfs
# slforge_771357931_193_bfs
# slforge_773677176_427_bfs
# slforge_776864605_374_bfs
# slforge_782418194_837_bfs
# slforge_788013554_446_bfs
# slforge_795035487_20_bfs
# slforge_795178056_607_bfs
# slforge_795376787_738_bfs
# slforge_796142499_387_bfs
# slforge_796506880_323_bfs
# slforge_800377751_363_bfs
# slforge_800722030_943_bfs
# slforge_808528605_390_bfs
# slforge_816008054_325_bfs
# slforge_820199596_871_bfs
# slforge_823583653_826_bfs
# slforge_82728797_895_bfs
# slforge_838424996_721_bfs
# slforge_841811023_124_bfs
# slforge_845724132_94_bfs
# slforge_847146610_207_bfs
# slforge_855889869_122_bfs
# slforge_865938663_515_bfs
# slforge_867700888_451_bfs
# slforge_874257686_92_bfs
# slforge_879183608_282_bfs
# slforge_884675872_52_bfs
# slforge_890466769_84_bfs
# slforge_901896146_472_bfs
# slforge_903761901_741_bfs
# slforge_909796666_966_bfs
# slforge_915089740_278_bfs
# slforge_918899334_586_bfs
# slforge_919149785_173_bfs
# slforge_927035214_407_bfs
# slforge_942299490_660_bfs
# slforge_946325154_115_bfs
# slforge_947663464_852_bfs
# slforge_948753605_228_bfs
# slforge_95252572_830_bfs
# slforge_968765260_413_bfs
# slforge_975968524_772_bfs
# slforge_983568254_694_bfs
# slforge_993417501_353_bfs"""
# x = """slforge_105895491_798_bfs
# slforge_252468730_872_bfs
# slforge_344149598_657_bfs
# slforge_551054484_629_bfs
# slforge_566434247_50_bfs
# slforge_670967089_538_bfs
# slforge_733825341_495_bfs
# slforge_819207223_555_bfs
# slforge_888958867_636_bfs"""
# for k in x.split('\n'):
# processor = Restructure_mdl('/home/sls6964xx/Documents/GPT2/gpt-2/preprocessor/output/'+k+'.mdl')#slforge_946325154_115_bfs.mdl')#('/home/sls6964xx/Desktop/RandomMOdelGeneratorInMDLFormat/slsf/reportsneo/2020-09-02-14-27-55/success/slforge_598683771_989.mdl')
# processor.restructure_single_mdl()
# processor = Restructure_mdl('/home/sls6964xx/Documents/GPT2/gpt-2/src/sample.mdl')#slforge_946325154_115_bfs.mdl')#('/home/sls6964xx/Desktop/RandomMOdelGeneratorInMDLFormat/slsf/reportsneo/2020-09-02-14-27-55/success/slforge_598683771_989.mdl')
# processor.restructure_single_mdl()
# processor = Restructure_mdl('/home/sls6964xx/Desktop/RandomMOdelGeneratorInMDLFormat/slsf/reportsneo/2020-09-02-14-27-55/success/slforge_598683771_989.mdl')
# processor.restructure_single_mdl()
```
#### File: gpt-2/preprocessor/simulink_preprocess.py
```python
from typing import List
from utils import get_tokens
def remove_graphic_component(text: str) -> List:
'''
removes components that are not necessary for the compilation of Simulink model. The component are auto generated when saved in Simulink.
args:
text: Simulink model file.
returns :
list of lines where line containing remove_list are filtered
'''
lines = []
remove_list = ["Position","ZOrder","SID","Points"]
for line in text.split("\n"):
line = line.lstrip()
if line.startswith(tuple(remove_list)) or len(line)==0:
continue
lines.append(line)
return lines
def keep_minimum_component_in_block(text: str) -> List:
lines = []
add_block_comp_list = ["BlockType","Name","Ports","SourceBlock","SourceType"]
brace_count = 0
for line in text.split("\n"):
line = line.strip()
if len(line) == 0:
continue
tok = get_tokens(line)
if "Block" in tok and "{" in tok:
brace_count = 1
lines.append(line)
elif "}" in tok:
brace_count = max(0, brace_count - 1)
if brace_count != 0:
lines.append(line)
if brace_count == 0:
lines.append(line)
else:
if line.startswith(tuple(add_block_comp_list)):
lines.append(line)
return lines
# import os
# directory = '/home/sls6964xx/Documents/GPT2/gpt-2/preprocessor/output'
# count = 1
# if not os.path.exists("Minimum"):
# os.mkdir("Minimum")
# for files in os.listdir(directory):
# count +=1
# print(count, " : ", files)
# with open(directory + "/" + files,"r") as file:
# output = keep_minimum_component_in_block(file.read())
# tmp_path = os.path.join("Minimum", files)
# with open(tmp_path, 'w') as r:
# r.write("\n".join(output))
```
#### File: gpt-2/src/interactive_conditional_samples.py
```python
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1000,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
print(length)
#elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#config = tf.ConfigProto(device_count={'GPU': 0})
config = tf.ConfigProto()
with tf.Session(graph=tf.Graph(),config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
raw_text = """Model {"""
#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
from datetime import datetime
#while True:
generated = 0
import time
grand_start = time.time()
for cnt in range(nsamples // batch_size):
start_per_sample = time.time()
output_text = raw_text
text = raw_text
context_tokens = enc.encode(text)
#raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
#print(context_tokens)
#file_to_save.write(raw_text)
#for cnt in range(nsamples // batch_size):
while "<|endoftext|>" not in text:
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:,
len(context_tokens):]
for i in range(batch_size):
#generated += 1
text = enc.decode(out[i])
if "<|endoftext|>" in text:
sep = "<|endoftext|>"
rest = text.split(sep, 1)[0]
output_text += rest
break
context_tokens = enc.encode(text)
output_text += text
print("=" * 40 + " SAMPLE " + str(cnt+12) + " " + "=" * 40)
minutes, seconds = divmod(time.time() - start_per_sample, 60)
print("Output Done : {:0>2}:{:05.2f}".format(int(minutes),seconds) )
print("=" * 80)
with open("Simulink_sample/sample__"+str(cnt+12)+".mdl","w+") as f:
f.write(output_text)
elapsed_total = time.time()-grand_start
hours, rem = divmod(elapsed_total,3600)
minutes, seconds = divmod(rem, 60)
print("Total time to generate 1000 samples :{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
fire.Fire(interact_model)
``` |
{
"source": "50417/SLGPT",
"score": 3
} |
#### File: SLGPT/code-process/Graph_Info.py
```python
import math
class Graph_Info():
def __init__(self , name):
self.simulink_name = name
#self.source_destination_path = []
#self.subgraphs = []
self.subgraphs_size = []
self.no_of_subgraphs = 0
self.source_destination_path = False
self.max_source_destination_path_length = 0
self.min_source_destination_path_length = math.inf
self.max_sub_graph_size = 0
def addpath(self,path):
#self.source_destination_path.append(path)
self.source_destination_path = True
self.max_source_destination_path_length = max(len(path),self.max_source_destination_path_length)
self.min_source_destination_path_length = min(len(path),self.min_source_destination_path_length)
def add_subgraph(self,sg):
if (len(sg) != 0):
self.no_of_subgraphs+=1
#self.subgraphs.append(sg)
self.max_sub_graph_size = max(self.max_sub_graph_size,len(sg))
self.subgraphs_size.append(len(sg))
``` |
{
"source": "505788057/RoboticArm-Ball",
"score": 3
} |
#### File: 505788057/RoboticArm-Ball/CaliPlot.py
```python
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
def list2array(listx, listy, listz, timelist):
time = np.array(timelist)
x = np.array(listx)
y = np.array(listy)
z = np.array(listz)
tem_time = timelist[0] % 10000
minus_time = timelist[0] - tem_time
# Least squares
Least_x = np.polyfit(time - minus_time, x, 1)
equation_x = np.poly1d(Least_x)
Least_z = np.polyfit(time - minus_time, z, 1)
equation_z = np.poly1d(Least_z)
Least_y = np.polyfit(time - minus_time, y, 2)
equation_y = np.poly1d(Least_y)
# or use yvals=np.polyval(z1,x)
fit_x = equation_x(time - minus_time)
fit_z = equation_z(time - minus_time)
fit_y = equation_y(time - minus_time)
# # 2D
plot1 = plt.plot(time - minus_time, z, '*', label='original values')
plot2 = plt.plot(time - minus_time, fit_z, 'r', label='polyfit values')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.legend(loc=4) # 指定legend的位置,读者可以自己help它的用法
plt.title('polyfitting')
# plt.show()
plt.savefig('fig.png')
print('Calidation Fished.')
# 3D
# mpl.rcParams['legend.fontsize'] = 10
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot(x, z, y, label='parametric curve')
# ax.plot(fit_x, fit_z, fit_y, label='parametric curve')
#
# ax.set_ylabel('Z')
# ax.set_zlabel('Y')
# ax.set_xlabel('X')
# ax.legend()
# plt.show()
if __name__ == '__main__':
# transfer them into array
# tennis1.svo
# timelist = [1590370805.462524, 1590370805.5231707, 1590370805.5813963, 1590370805.6406145, 1590370805.698655, 1590370805.755522, 1590370805.811019, 1590370805.8673904, 1590370805.9244144, 1590370805.9823272, 1590370806.042043, 1590370806.1024814, 1590370806.1583822, 1590370806.2692547, 1590370806.3247852, 1590370806.382588, 1590370806.438314, 1590370806.4938543, 1590370806.548817, 1590370806.6595979, 1590370806.7155735, 1590370806.7721534]
# listx = [1.378333568572998, 2.6016061, 2.8438501, 2.8486598, 2.770108222961426, 2.4207764, 2.3422040939331055, 2.324617862701416, 2.1079476, 1.7550919, 1.5248140096664429, 1.2089921236038208, 0.9462942, 0.28882295, 0.16508897, -0.3907056450843811, -0.4709196, -0.9288353, -1.448897123336792, -1.7345365, -2.0495212, -0.8052868843078613]
# listy = [-0.05165481939911842, -0.26656777, -0.51725614, -0.81832075, -0.9825394153594971, -1.1084504, -1.2568608522415161, -1.479618787765503, -1.5843353, -1.6481928, -1.700961947441101, -1.6539585590362549, -1.6358579, -1.41463, -1.3329362, -1.1728086471557617, -0.9336073, -0.6354038, -0.2573710083961487, 0.43543887, 0.78495455, 0.3775915503501892]
# listz = np.array([1.4929945468902588, 2.8918116, 3.2883322, 3.6273456, 3.683410167694092, 3.5859275, 3.746155261993408, 3.9522814750671387, 4.021757, 3.9577022, 3.9235706329345703, 3.7479872703552246, 3.6744277, 4.0746527, 4.0460835, 4.078764915466309, 3.984223, 3.8409414, 3.635684013366699, 2.8342047, 2.5602126, 0.8657321929931641])
# tesnnis2.svo
listx = [2.9003219604492188, 2.914999, 2.7897768020629883, 2.664806842803955, 2.509872, 2.502492, 2.257084608078003,
2.12467622756958, 2.026886224746704, 1.8217148, 1.6705565, 1.4125998, 0.7843540906906128,
0.6815392374992371,
0.6116997, 0.51989925, 0.4470337927341461, 0.39395439624786377, 0.3130735158920288, 0.23555606603622437,
0.1817034,
0.1342813, 0.04152153059840202, -0.08397220075130463, -0.60927105, -0.7687373757362366,
-0.9387397766113281,
-1.0574983, -1.0280832, -1.2355990409851074, -1.3635989427566528, -1.3623799]
listy = [0.43417564034461975, 0.16162421, -0.11754219233989716, -0.38563716411590576, -0.63429135, -0.9109296,
-1.102527379989624, -1.2968493700027466, -1.4311916828155518, -1.5778172, -1.6967598, -1.7303532,
-0.9435155391693115, -0.9681894779205322, -0.9778852, -1.0099608, -1.005697250366211, -1.001267910003662,
-0.9756706953048706, -0.9305459260940552, -0.90242606, -0.8221237, -0.7837788462638855,
-0.7024073004722595,
-0.72066426, -0.44028931856155396, -0.14642766118049622, 0.18590547, 0.43050718, 0.7862274050712585,
1.108873963356018, 1.3778218]
listz = [3.974457025527954, 4.2234783, 4.310595512390137, 4.331752777099609, 4.395141, 4.5437713, 4.517788887023926,
4.620116233825684, 4.445725440979004, 4.5253596, 4.4999332, 4.336791, 2.3845386505126953,
2.364236354827881,
2.350337, 2.3552065, 2.3541321754455566, 2.387808322906494, 2.365471363067627, 2.3478844165802, 2.3347933,
2.3187907, 2.2945680618286133, 2.336951732635498, 4.8546853, 4.71743106842041, 4.753421306610107,
4.6235576,
4.0115194, 4.3189191818237305, 4.19636344909668, 3.7759159]
timelist = [1590734852.1242003, 1590734852.1807272, 1590734852.234512, 1590734852.2898612, 1590734852.3444474,
1590734852.3990796, 1590734852.4539366, 1590734852.508885, 1590734852.564926, 1590734852.6204963,
1590734852.6761467, 1590734852.730033, 1590734852.7859554, 1590734852.841079, 1590734852.8953316,
1590734852.9507463, 1590734853.0056183, 1590734853.060884, 1590734853.116235, 1590734853.1715913,
1590734853.2258458, 1590734853.2809885, 1590734853.3365073, 1590734853.3915794, 1590734853.5579169,
1590734853.6116655, 1590734853.6685421, 1590734853.723139, 1590734853.7786517, 1590734853.8343437,
1590734853.8984737, 1590734853.9518814]
# tennis3.svo
# listx = [0.3172745, 0.30599847435951233, 0.2955, 0.2935665, 0.28753167390823364, 0.28932345, 0.2934848368167877,
# 0.296322762966156, 0.2889719307422638, 0.28359633684158325, 0.2756395637989044, 0.25924360752105713,
# 0.24964043, 0.22924330830574036, 0.21541204, 0.2042052447795868, 0.17592868208885193, 0.16845292,
# 0.13332465291023254, 0.12662326, 0.11186858266592026, 0.08200584352016449, 0.03798733651638031,
# -0.0019563164096325636, -0.010218276, -0.02275446429848671, -0.030307572335004807, -0.06421846151351929,
# -0.089726634, -0.09471378, -0.11786812, -0.1374852, -0.1560058444738388, -0.16710165, -0.21381166577339172,
# -0.8069354891777039, -1.0064988, -1.2963908910751343, -1.2578022480010986, -1.8013108968734741]
# listy = [0.5728218, 0.5843399167060852, 0.60232395, 0.60962135, 0.6122140884399414, 0.6264883, 0.6467896103858948,
# 0.6480776071548462, 0.6529171466827393, 0.6521287560462952, 0.6550891399383545, 0.6397219896316528,
# 0.5920477, 0.5105807781219482, 0.37324274, 0.2506449818611145, 0.12893235683441162, 0.0034519485,
# -0.1891399621963501, -0.32670522, -0.513620913028717, -0.6497299671173096, -0.7802656888961792,
# -0.9179131984710693, -0.527315, -0.5445464849472046, -0.5472044944763184, -0.5542003512382507, -0.5442121,
# -0.52128863, -0.5026254, -0.4614783, -0.41056010127067566, -0.3533057, -0.2842305302619934,
# -0.5402709245681763, -0.15292263, 0.38480105996131897, 0.9864322543144226, 1.2156903743743896]
# listz = [3.7283587, 3.696535587310791, 3.6984868, 3.6879237, 3.69278883934021, 3.6998756, 3.6971168518066406,
# 3.6943867206573486, 3.693394184112549, 3.665677070617676, 3.6571907997131348, 3.6156864166259766,
# 3.5504797, 3.4731931686401367, 3.410746, 3.533870220184326, 3.503570079803467, 3.674098, 4.085834503173828,
# 3.9624631, 4.301963806152344, 4.330367088317871, 4.097470283508301, 4.188103199005127, 2.422814,
# 2.327251672744751, 2.2162179946899414, 2.0744686126708984, 1.9716586, 1.8658121, 1.7759119, 1.6916102,
# 1.5570632219314575, 1.4853101, 1.3855901956558228, 4.590245246887207, 4.931698, 4.860433101654053,
# 4.329430103302002, 3.885613441467285]
# timelist = [1590723152.54412, 1590723152.599881, 1590723152.6565528, 1590723152.7145152, 1590723152.7717779,
# 1590723152.8289044, 1590723152.8841283, 1590723152.9413185, 1590723152.9963236, 1590723153.051572,
# 1590723153.1083086, 1590723153.1644797, 1590723153.2196696, 1590723153.2749991, 1590723153.3315353,
# 1590723153.388707, 1590723153.4455416, 1590723153.5003061, 1590723153.5613267, 1590723153.617249,
# 1590723153.674275, 1590723153.7310069, 1590723153.7860005, 1590723153.8424776, 1590723153.8966901,
# 1590723153.9523175, 1590723154.0071263, 1590723154.06228, 1590723154.1192317, 1590723154.174128,
# 1590723154.2299175, 1590723154.2853584, 1590723154.3413773, 1590723154.3956435, 1590723154.4522626,
# 1590723154.507029, 1590723154.5625625, 1590723154.621313, 1590723154.6772866, 1590723154.7329192]
list2array(listx, listy, listz, timelist)
```
#### File: 505788057/RoboticArm-Ball/object_zed.py
```python
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tensorflow as tf
import collections
import statistics
import math
import tarfile
import os.path
import CaliPlot
from threading import Lock, Thread
from time import sleep
import time
import cv2
# ZED imports
import pyzed.sl as sl
sys.path.append('utils')
# ## Object detection imports
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_image_into_numpy_array(image):
ar = image.get_data()
ar = ar[:, :, 0:3]
(im_height, im_width, channels) = image.get_data().shape
return np.array(ar).reshape((im_height, im_width, 3)).astype(np.uint8)
def load_depth_into_numpy_array(depth):
ar = depth.get_data()
ar = ar[:, :, 0:4]
(im_height, im_width, channels) = depth.get_data().shape
return np.array(ar).reshape((im_height, im_width, channels)).astype(np.float32)
lock = Lock()
# width = 704
# height = 416
width = 1280
height = 720
# width = 672
# height = 376
confidence = 0.55
image_np_global = np.zeros([width, height, 3], dtype=np.uint8)
depth_np_global = np.zeros([width, height, 4], dtype=np.float)
exit_signal = False
new_data = False
# ZED image capture thread function
def capture_thread_func(svo_filepath=None):
global image_np_global, depth_np_global, exit_signal, new_data
zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
input_type = sl.InputType()
if svo_filepath is not None:
input_type.set_from_svo_file(svo_filepath)
init_params = sl.InitParameters(input_t=input_type)
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.camera_fps = 60
# init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init_params.depth_mode = sl.DEPTH_MODE.ULTRA
init_params.coordinate_units = sl.UNIT.METER
init_params.svo_real_time_mode = False
# Open the camera
err = zed.open(init_params)
print(err)
while err != sl.ERROR_CODE.SUCCESS:
err = zed.open(init_params)
print(err)
sleep(1)
image_mat = sl.Mat()
depth_mat = sl.Mat()
runtime_parameters = sl.RuntimeParameters()
image_size = sl.Resolution(width, height)
while not exit_signal:
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
zed.retrieve_image(image_mat, sl.VIEW.LEFT, resolution=image_size)
zed.retrieve_measure(depth_mat, sl.MEASURE.XYZRGBA, resolution=image_size)
lock.acquire()
image_np_global = load_image_into_numpy_array(image_mat)
depth_np_global = load_depth_into_numpy_array(depth_mat)
new_data = True
lock.release()
sleep(0.01)
zed.close()
xlist = []
ylist = []
zlist = []
timelist = []
def display_objects_distances(image_np, depth_np, num_detections, boxes_, classes_, scores_, category_index):
global xlist, ylist, zlist
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
research_distance_box = 20
for i in range(num_detections):
if scores_[i] > confidence:
box = tuple(boxes_[i].tolist())
if classes_[i] in category_index.keys():
class_name = category_index[classes_[i]]['name']
# display_str = str(class_name)
# if not display_str:
# display_str = '{}%'.format(int(100 * scores_[i]))
# else:
# display_str = '{}: {}%'.format(display_str, int(100 * scores_[i]))
# Find object distance
# print(box)
ymin, xmin, ymax, xmax = box
x_center = int(xmin * width + (xmax - xmin) * width * 0.5)
y_center = int(ymin * height + (ymax - ymin) * height * 0.5)
x_vect = []
y_vect = []
z_vect = []
min_y_r = max(int(ymin * height), int(y_center - research_distance_box))
min_x_r = max(int(xmin * width), int(x_center - research_distance_box))
max_y_r = min(int(ymax * height), int(y_center + research_distance_box))
max_x_r = min(int(xmax * width), int(x_center + research_distance_box))
# print(max_y_r - min_y_r)
# print(max_x_r - min_x_r)
if min_y_r < 0: min_y_r = 0
if min_x_r < 0: min_x_r = 0
if max_y_r > height: max_y_r = height
if max_x_r > width: max_x_r = width
# x_vect.append(depth_np[y_center,x_center, 0])
# y_vect.append(depth_np[y_center,x_center, 1])
# z_vect.append(depth_np[y_center,x_center, 2])
for j_ in range(min_y_r, max_y_r):
for i_ in range(min_x_r, max_x_r):
# print(depth_np)
z = depth_np[j_, i_, 2]
# 如果z不是空值或者不是无限大的数字
if not np.isnan(z) and not np.isinf(z):
# print(depth_np[j_, i_, 0])
# print(depth_np[j_, i_, 1])
# print(z)
x_vect.append(depth_np[j_, i_, 0])
y_vect.append(depth_np[j_, i_, 1])
z_vect.append(z)
if len(x_vect) > 0:
x = statistics.median(x_vect)
y = statistics.median(y_vect)
z = statistics.median(z_vect)
xlist.append(x)
ylist.append(y)
zlist.append(z)
timelist.append(time.time())
distance = math.sqrt(x * x + y * y + z * z)
display_str = 'x:' + str('% 4.3f' % x) + ' y:' + str('% 4.3f' % y) + ' z:' + str('% 4.3f' % z)
display_str = display_str + " " + str('% 6.2f' % distance) + " m "
print(display_str)
box_to_display_str_map[box].append(display_str)
box_to_color_map[box] = vis_util.STANDARD_COLORS[classes_[i] % len(vis_util.STANDARD_COLORS)]
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
vis_util.draw_bounding_box_on_image_array(
image_np,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=4,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=True)
return image_np
def main(args):
global xlist, ylist, zlist, timelist
svo_filepath = None
if len(args) > 1:
svo_filepath = args[1]
# This main thread will run the object detection, the capture thread is loaded later
# What model to download and load
MODEL_NAME = 'fast_rcnn_inception_v2_coco_1000'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = 'data/' + MODEL_NAME + '/frozen_inference_graph.pb'
# Check if the model is already present
if not os.path.isfile(PATH_TO_FROZEN_GRAPH):
print("The model " + MODEL_NAME + " is not exit.")
print('Please check the .pb file path.')
os._exit(0)
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'tennis_label_map.pbtxt')
NUM_CLASSES = 1
# Start the capture thread with the ZED input
print("Starting the ZED")
capture_thread = Thread(target=capture_thread_func, kwargs={'svo_filepath': svo_filepath})
capture_thread.start()
# Shared resources
global image_np_global, depth_np_global, new_data, exit_signal
# Load a (frozen) Tensorflow model into memory.
print("Loading model " + PATH_TO_FROZEN_GRAPH + '\n')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Limit to a maximum of 50% the GPU memory usage taken by TF https://www.tensorflow.org/guide/using_gpu
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.6
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Detection
with detection_graph.as_default():
with tf.compat.v1.Session(config=config, graph=detection_graph) as sess:
while not exit_signal:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
if new_data:
lock.acquire()
image_np = np.copy(image_np_global)
depth_np = np.copy(depth_np_global)
new_data = False
lock.release()
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
num_detections_ = num_detections.astype(int)[0]
if num_detections_> 0 :
# Visualization of the results of a detection.
image_np = display_objects_distances(
image_np,
depth_np,
num_detections_,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index)
else:
print('Do not detect.')
cv2.imshow('ZED object detection', cv2.resize(image_np, (width, height)))
# sleep(1)
if cv2.waitKey(10) & 0xFF == ord('q'):
cv2.destroyAllWindows()
exit_signal = True
else:
sleep(0.01)
sess.close()
exit_signal = True
# CaliPlot.list2array(xlist, ylist, zlist, timelist)
# filename = 'write_data.txt'
# with open(filename, 'a') as f: # 如果filename不存在会自动创建, 'w'表示写数据,写之前会清空文件中的原有数据!
# f.write(str(xlist) + '\n')
# f.write(str(ylist) + '\n')
# f.write(str(zlist) + '\n')
# f.write(str(timelist) + '\n')
capture_thread.join()
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "507C/Maxwell-Demon-Game",
"score": 4
} |
#### File: 507C/Maxwell-Demon-Game/BallsAndWalls.py
```python
import taichi as ti
from Rigid2dBody import Circles, AABBs
from random import random
import math
__author__ = "507C"
__credits__ = ["507C", "Y7K4"]
__version__ = "1.0.1"
__maintainer__ = "507C"
'''
2d balls for the maxwell's demon game. Each object represent a group of balls.
'''
@ti.data_oriented
class Balls(Circles):
def __init__(self, N, mass, pos_x, pos_y, theta, radius, init_speed, elasticity) -> None:
pos = ti.Vector.field(2, ti.f32, shape=N)
vel = ti.Vector.field(2, ti.f32, shape=N)
for i in range(N):
pos[i] = ti.Vector([pos_x[i], pos_y[i]])
vel[i] = ti.Vector([ti.cos(theta[i]), ti.sin(theta[i])]) * init_speed
super().__init__(N, mass, pos, radius, elasticity, vel)
'''
True if all balls are in the given area(either left half or right half).
'''
def detect_success(self, left=True):
for i in range(self.n):
if left and self.pos[i][0] + self.radius > 0.5:
return False
if not left and self.pos[i][0] - self.radius < 0.5:
return False
return True
'''
The number of balls in the given area(either left half or right half).
'''
def success_ball_number(self, left=True):
num = 0
for i in range(self.n):
if left and self.pos[i][0] + self.radius < 0.5:
num += 1
if not left and self.pos[i][0] - self.radius > 0.5:
num += 1
return num
'''
Calculate kinetic energy. Help with testing, not used in game itself.
'''
@ti.kernel
def get_kinetic_energy(self) -> ti.f32:
k = 0.0
for i in range(self.n):
k += self.mass * self.vel[i].norm() ** 2
k /= 2.0
return k
'''
Calculate momentum x dir. Help with testing, not used in game itself.
'''
@ti.kernel
def get_momentum_x(self) -> ti.f32:
p = ti.Vector([0.0, 0.0])
for i in range(self.n):
p += self.mass * self.vel[i]
return p[0]
'''
Calculate momentum y dir. Help with testing, not used in game itself.
'''
@ti.kernel
def get_momentum_y(self) -> ti.f32:
p = ti.Vector([0.0, 0.0])
for i in range(self.n):
p += self.mass * self.vel[i]
return p[1]
'''
Walls for the maxwell's demon game. consist of the 4 boundaries and 2 moveable walls in the middle.
'''
@ti.data_oriented
class Walls(AABBs):
def __init__(self, elasticity) -> None:
N, mass = 6, 0
topleft = ti.Vector.field(2, ti.f32, shape=N)
bottomright = ti.Vector.field(2, ti.f32, shape=N)
vel_walls = ti.Vector.field(2, ti.f32, shape=N)
topleft[0], bottomright[0] = ti.Vector([0, 0]), ti.Vector([0.01, 1])
topleft[1], bottomright[1] = ti.Vector([0.99, 0]), ti.Vector([1, 1])
topleft[2], bottomright[2] = ti.Vector([0.01, 0]), ti.Vector([0.99, 0.01])
topleft[3], bottomright[3] = ti.Vector([0.01, 0.99]), ti.Vector([0.99, 1])
topleft[4], bottomright[4] = ti.Vector([0.495, 0.01]), ti.Vector([0.505, 0.45])
topleft[5], bottomright[5] = ti.Vector([0.495, 0.55]), ti.Vector([0.505, 0.99])
for i in range(N):
vel_walls[i] = ti.Vector([0, 0]) # all still, no velocity
super().__init__(N, mass, topleft, bottomright, elasticity, vel_walls)
'''
center walls move by maxwell's demon, direction either up or down
'''
def walls_move(self, up=True, step=0.02):
if up:
if self.bottomright[4][1] > step:
self.bottomright[4][1] -= step
self.topleft[5][1] -= step
else:
if self.topleft[5][1] < 1 - step:
self.bottomright[4][1] += step
self.topleft[5][1] += step
'''
Generate a ball's center position within the given range, and a random direction.
'''
def generate_pos_dir(xmin, xmax, ymin, ymax, r):
pos_x = (xmin + r) + random() * (xmax - xmin - 2 * r)
pos_y = (ymin + r) + random() * (ymax - ymin - 2 * r)
theta = random() * 2 * math.pi
return pos_x, pos_y, theta
'''
Generate center positions and random directions for a given number of balls in the game.
'''
def generate_pos_dir_all(n, r):
pos_x, pos_y, theta = [None]*n, [None]*n, [None]*n
for i in range(n):
sink = True
while(sink):
if (random() < 0.5):
pos_x[i], pos_y[i], theta[i] = generate_pos_dir(0.01, 0.495, 0.01, 0.99, r)
else:
pos_x[i], pos_y[i], theta[i] = generate_pos_dir(0.505, 0.99, 0.01, 0.99, r)
sink = False
for j in range(i - 1):
if (pos_x[i] - pos_x[j]) ** 2 + (pos_y[i] - pos_y[j]) ** 2 < r ** 2:
sink = True
return pos_x, pos_y, theta
```
#### File: 507C/Maxwell-Demon-Game/Rigid2dBody.py
```python
import taichi as ti
from random import random
from random import seed
__author__ = "507C"
__credits__ = ["507C", "Y7K4"]
__version__ = "1.0.1"
__maintainer__ = "507C"
'''
Rigid bodies. Non-deformable, no squashing or stretching allowed.
'''
@ti.data_oriented
class Rigid2dBodies:
def __init__(self, N, mass, elasticity, vel_init) -> None:
# 2d rigid body object-related field
self.n = N
self.mass = mass # set mass = 0 for mass = inf
self.elasticity = elasticity
self.vel = vel_init
self.d_vel = ti.Vector.field(2, ti.f32, shape=self.n)
self.inv_mass = mass # use inv_mass to save cal time in every itr and deal with inf mass case
if self.mass == 0:
self.inv_mass = 0
else:
self.inv_mass = 1/self.mass
def display(self, gui, color=0xffffff):
pass
@ti.func
def clear_d_vel(self):
for i in self.d_vel:
self.d_vel[i] = ti.Vector([0.0, 0.0])
@ti.kernel
def update(self, h: ti.f32):
pass
@staticmethod
@ti.func
def get_impulse(rv, normal, elasticity1, elasticity2, inv_mass1, inv_mass2):
# relative velocity in terms of the normal direction
vel_along_normal = rv[0] * normal[0] + rv[1] * normal[1]
# Do not resolve if velocities are separating(impulse = 0)
impulse = ti.Vector([0.0, 0.0])
# If not separating, calculate impulse
if vel_along_normal < 0:
e = min(elasticity1, elasticity2)
j = -(1 + e) * vel_along_normal
j /= inv_mass1 + inv_mass2
impulse = j * normal
return impulse
'''
Circles, each circle represented by a center pos and a radius
'''
@ti.data_oriented
class Circles(Rigid2dBodies):
def __init__(self, N, mass, pos, radius, elasticity, vel_init) -> None:
super().__init__(N, mass, elasticity, vel_init)
self.pos = pos # pos of center
self.radius = radius
def display(self, gui, pic_size=600, color=0xffffff):
gui.circles(self.pos.to_numpy(), radius=self.radius * pic_size, color=color)
'''
resolve collision between bodies inside Circle object
'''
@ti.kernel
def compute_impulse_vs_self(self):
for i in range(self.n):
for j in range(self.n):
# resolve collision. Update both for time saving.
if i < j:
diff = self.pos[j] - self.pos[i]
if diff.norm() < 2 * self.radius:
# relative velocity
rv = self.vel[j] - self.vel[i]
# normal relative position
normal = diff / diff.norm()
# get impulse
impulse = Rigid2dBodies.get_impulse(rv, normal, self.elasticity,
self.elasticity, self.inv_mass, self.inv_mass)
# update both i and j
self.d_vel[i] -= self.inv_mass * impulse
self.d_vel[j] += self.inv_mass * impulse
'''
resolve collision between Circle object and another Circle object
'''
@ti.kernel
def compute_impulse_vs_circle(self, circles: ti.template()):
for i in range(self.n):
for j in range(circles.n):
# resolve collision
diff = circles.pos[j] - self.pos[i]
if diff.norm() < self.radius + circles.radius:
# relative velocity
rv = circles.vel[j] - self.vel[i]
# normal relative position
normal = diff / diff.norm()
# get impulse
impulse = Rigid2dBodies.get_impulse(rv, normal, self.elasticity,
circles.elasticity, self.inv_mass, circles.inv_mass)
# update d_vel[i]
self.d_vel[i] -= self.inv_mass * impulse
'''
resolve collision between Circle object and AABB object
'''
@ti.kernel
def compute_impulse_vs_AABB(self, aabbs: ti.template()):
for i in range(self.n):
for j in range(aabbs.n):
#calculate normal
pos_j = (aabbs.topleft[j] + aabbs.bottomright[j])/2
n = self.pos[i] - pos_j
closest = n
x_extent = abs(aabbs.topleft[j][0] - aabbs.bottomright[j][0]) / 2
y_extent = abs(aabbs.topleft[j][1] - aabbs.bottomright[j][1]) / 2
closest[0] = max(min(closest[0], x_extent), -x_extent)
closest[1] = max(min(closest[1], y_extent), -y_extent)
normal = n - closest
# resolve collision
if normal.norm() < self.radius:
# relative velocity
rv = - aabbs.vel[j] + self.vel[i]
# normal relative position
normal /= normal.norm()
# get impulse
impulse = Rigid2dBodies.get_impulse(rv, normal, self.elasticity,
aabbs.elasticity, self.inv_mass, aabbs.inv_mass)
# update d_vel[i]
self.d_vel[i] += self.inv_mass * impulse
@ti.kernel
def update(self, h: ti.f32):
# update vel and pos
for i in self.vel:
self.vel[i] += self.d_vel[i]
self.pos[i] += h * self.vel[i]
# clear d_vel for the next itr
self.clear_d_vel()
'''
Axis Aligned Bounding Boxes, each box represent by the topleft point and the bottomright point
'''
@ti.data_oriented
class AABBs(Rigid2dBodies):
def __init__(self, N, mass, topleft, bottomright, elasticity, vel_init) -> None:
super().__init__(N, mass, elasticity, vel_init)
self.topleft = topleft
self.bottomright = bottomright
def display(self, gui, color=0xffffff):
for i in range(self.n):
gui.rect(self.topleft[i], self.bottomright[i], radius=1, color=color)
@ti.kernel
def update(self, h: ti.f32):
# update vel and pos
for i in self.vel:
self.vel[i] += self.d_vel[i]
self.topleft[i] += h * self.vel[i]
self.bottomright[i] += h * self.vel[i]
# clear d_vel for the next itr
self.clear_d_vel()
``` |
{
"source": "509854708/genshin_task-resin-expedition_alert",
"score": 2
} |
#### File: alert/notifiers/discord.py
```python
from .basenotifier import BaseNotifier as Base
from ..config import config
class Discord(Base):
def __init__(self):
self.name = 'Discord'
self.token = config.DISCORD_WEBHOOK
self.retcode_value = 204
def send(self, text, status, desp):
url = config.DISCORD_WEBHOOK
data = {
'embeds': [{
'title': f'{text}{status}',
'description': desp,
'color': "15553898"
}]
}
return self.push('post', url, json=data)
```
#### File: alert/notifiers/pushdeer.py
```python
from .basenotifier import BaseNotifier as Base
from ..config import config
class Pushdeer(Base):
def __init__(self):
self.name = 'Pushdeer'
self.token = config.PUSHDEER_KEY
self.retcode_key = 'code'
self.retcode_value = 0
def send(self, text, status, desp):
url = 'https://api2.pushdeer.com/message/push'
data = {
'pushkey': config.PUSHDEER_KEY,
'text': f'{text} {status}',
'desp': desp,
'type': 'markdown'
}
return self.push('post', url, data=data)
``` |
{
"source": "50kawa/StackGAN-v2",
"score": 2
} |
#### File: code/auto-encoder-pokemon/model.py
```python
import random
import torch
import torch.nn as nn
import torch.nn.parallel
from miscc.config import cfg
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.hid_dim = cfg.CHARLSTM.DIMENSION
self.n_layers = 1
self.word_embeds = nn.Embedding(cfg.CHAR.VOCABSIZE, cfg.CHARVEC.DIMENSION)
self.pokemon_embed = nn.Linear(cfg.POKEMON.SIZE, cfg.POKEMON.DIMENSION)
self.rnn = nn.LSTM(cfg.CHARVEC.DIMENSION + cfg.POKEMON.DIMENSION, cfg.CHARLSTM.DIMENSION, 1, dropout=cfg.CHARLSTM.DROPOUT)
self.dropout = nn.Dropout(cfg.CHARLSTM.DROPOUT)
def forward(self, sentence, embedding):
embeds = self.word_embeds(Variable(sentence))
concated_emb = torch.cat((embeds, self.pokemon_embed(embedding).repeat(len(sentence), 1, 1)), dim=2)
lstm_out, (hn, cn) = self.rnn(concated_emb)
return hn, cn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.output_dim = cfg.CHAR.VOCABSIZE
self.hid_dim = cfg.CHARLSTM.DIMENSION
self.n_layers = 1
self.word_embeds = nn.Embedding(cfg.CHAR.VOCABSIZE + 1, cfg.CHARVEC.DIMENSION)
self.pokemon_embed = nn.Linear(cfg.POKEMON.SIZE, cfg.POKEMON.DIMENSION)
self.rnn = nn.LSTM(cfg.CHARVEC.DIMENSION + cfg.POKEMON.DIMENSION, cfg.CHARLSTM.DIMENSION, 1, dropout=cfg.CHARLSTM.DROPOUT)
self.fc_out = nn.Linear(cfg.CHARLSTM.DIMENSION, cfg.CHAR.VOCABSIZE + cfg.POKEMON.SIZE)
self.dropout = nn.Dropout(cfg.CHARLSTM.DROPOUT)
def forward(self, sentence, embedding, hidden, cell):
sentence = sentence.unsqueeze(0)
embeds = self.word_embeds(Variable(sentence))
concated_emb = torch.cat((embeds, self.pokemon_embed(embedding).unsqueeze(0)), dim=2)
output, (hidden, cell) = self.rnn(concated_emb, (hidden, cell))
prediction = self.fc_out(output.squeeze(0))
output_word = prediction[:, :cfg.CHAR.VOCABSIZE]
output_emb = prediction[:, cfg.CHAR.VOCABSIZE:]
return output_word, output_emb, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.module.hid_dim == decoder.module.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.module.n_layers == decoder.module.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, pokemon_words, pokemon_emb, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src = [src len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = pokemon_words.shape[1]
src_len = pokemon_words.shape[0]
src_vocab_size = self.decoder.module.output_dim
# tensor to store decoder outputs
outputs = torch.zeros(src_len, batch_size, src_vocab_size).to(self.device)
# last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(pokemon_words, pokemon_emb)
# first input to the decoder is the <sos> tokens
input = torch.tensor([cfg.CHAR.VOCABSIZE]).repeat(cfg.TRAIN.BATCH_SIZE)
input_pokemon_emb = torch.zeros(cfg.TRAIN.BATCH_SIZE, cfg.POKEMON.SIZE)
for t in range(src_len):
# insert input token embedding, previous hidden and previous cell states
# receive output tensor (predictions) and new hidden and cell states
output_word, output_emb, hidden, cell = self.decoder(input, input_pokemon_emb, hidden, cell)
# place predictions in a tensor holding predictions for each token
outputs[t] = output_word
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = torch.argmax(output_word, dim=1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
if teacher_force:
input = pokemon_words[t]
input_pokemon_emb = pokemon_emb
else:
input = top1
input_pokemon_emb = output_emb
return outputs, output_emb
``` |
{
"source": "50mkw/mysite",
"score": 2
} |
#### File: mysite/rec/__init__.py
```python
from rec.decorators import register
from rec.filters import (
AllValuesFieldListFilter, BooleanFieldListFilter, ChoicesFieldListFilter,
DateFieldListFilter, FieldListFilter, ListFilter, RelatedFieldListFilter,
RelatedOnlyFieldListFilter, SimpleListFilter,
)
from rec.helpers import ACTION_CHECKBOX_NAME
from rec.options import (
HORIZONTAL, VERTICAL, ModelRec, StackedInline, TabularInline,
)
from rec.sites import RecSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register", "ACTION_CHECKBOX_NAME", "ModelRec", "HORIZONTAL", "VERTICAL",
"StackedInline", "TabularInline", "ListFilter",
"SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter",
"RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter",
"AllValuesFieldListFilter", "RelatedOnlyFieldListFilter",
]
def autodiscover():
autodiscover_modules('rec', register_to=site)
default_app_config = 'rec.apps.RecConfig'
``` |
{
"source": "50m-regent/CGKit",
"score": 3
} |
#### File: cgkit/scaffolds/card.py
```python
class Card:
def __init__(self, data=None):
if data == None:
self.id = 0
self.name = 'unnamed'
self.status = {}
else:
self.id = data.id
self.name = data.name
self.status = data.status
```
#### File: cgkit/scaffolds/game.py
```python
from random import randint
class Game:
def __init__(self, players=[]):
self.players = players
def init(self):
self.turn = randint(0, len(players) - 1)
``` |
{
"source": "50shashwat/factik",
"score": 3
} |
#### File: factik/flask/RestApi.py
```python
import flask
import sys
import pickle
import joblib
import pandas as pd
from sklearn.utils import shuffle
import random
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import string
import numpy as np
import re
import nltk
import pickle
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from flask import request, jsonify
from sklearn.metrics import log_loss
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/getPrediction/', methods=['GET'])
def home():
print("Output from Python")
fname = 'pickle_file_name.pkl'
model = joblib.load(open(fname, 'rb'))
infil = open("feature.pkl",'rb')
new_bow = pickle.load(infil)
infil.close()
if 'inputtext' in request.args:
id = request.args['inputtext']
else:
return "Error: No id field provided. Please specify an input text."
input = [id]
aah = new_bow.transform(input).toarray()
var = model.predict(aah)
probs = model.predict_proba(aah)
x = probs[:,0]
y = probs[:,1]
x = x.tolist()
y = y.tolist()
books = [
{'id': 'Fake Percentage',
'Percentage': x },
{'id': 'True Percentage',
'Percentage': y}
]
return jsonify(books)
app.run(host='0.0.0.0')
``` |
{
"source": "50sven/carla_rllib",
"score": 3
} |
#### File: carla_rllib/utils/seeding.py
```python
import random
import os
import numpy as np
def set_seed(i):
try:
import MPI
rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
rank = 0
myseed = i + 1000 * rank if i is not None else None
try:
import tensorflow as tf
tf.set_random_seed(myseed)
except ImportError:
pass
try:
import torch
torch.manual_seed(myseed)
torch.cuda.manual_seed_all(myseed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
except ImportError:
pass
np.random.seed(myseed)
random.seed(myseed)
os.environ['PYTHONHASHSEED'] = str(seed)
``` |
{
"source": "50sven/Exploring-Historical-Text-Corpora-Using-Word-and-Document-Embeddings",
"score": 2
} |
#### File: Exploring-Historical-Text-Corpora-Using-Word-and-Document-Embeddings/dashboard/run_app.py
```python
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import pickle
import plotly.graph_objs as go
from geopy import Nominatim
from Doc2Vec_Evaluation import get_most_similar_tokens
from app import app
from components import Header, Table, Scatter, BarOverview, Map, Dropdown, BarSpecific, DocSpecific, Author
geolocator = Nominatim(user_agent="geolocator")
# Overview
overview_tsne = pickle.load(open("./assets/data_overview_tsne.pkl", "rb"))
overview_persons = pickle.load(open("./assets/data_overview_persons_by_tag.pkl", "rb"))
overview_places = pickle.load(open("./assets/data_overview_places_by_tag.pkl", "rb"))
# Book
author_data = pd.read_csv("./assets/data_author_information.csv", delimiter="|")
id_mapping = pickle.load(open("./assets/data_id_mapping.pkl", "rb"))
specific_entities = pickle.load(open("./assets/data_specific_entities_by_tag.pkl", "rb"))
doc_similarities = pickle.load(open("./assets/data_doc_similarities.pkl", "rb"))
# Word
vocabulary = list(id_mapping.keys())[308:]
remaining_persons = pickle.load(open('./assets/data_remaining_persons.pkl', 'rb'))
remaining_places = pickle.load(open('./assets/data_remaining_places.pkl', 'rb'))
cos_sim_matrix = pd.read_pickle("./assets/data_cosine_similarity_matrix.pkl")
overview = html.Div(id="body1", children=[
Header("overview"),
html.Div(id="ColumnBlockOverview", children=[
Scatter(overview_tsne),
BarOverview(overview_persons),
html.Div(id="tableHeadline", children=[
html.H4(["Collection of Books"])
]),
Table(author_data[["Title", "Author", "Publishing Date"]]),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
book = html.Div(id="body1", children=[
Header("book"),
Dropdown("book", list(specific_entities.keys())),
html.Div(id="ColumnBlockBook", children=[
Author(),
html.Div(id="specTitBox", children=[
html.H1(id="specificTitle", children=[])
]),
html.Div(id="DocSimDiv", children=[
html.H1(id="DocSimHead", children=["Most similar Documents"]),
DocSpecific(),
]),
BarSpecific(),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
word = html.Div(id="body1", children=[
Header("word"),
Dropdown("word", vocabulary),
html.Div(id="ColumnBlockWord", children=[
html.Div(id="specTitBox", children=[
html.H1(id="specificTitle", children=[])
]),
html.Div(id="DocSimDiv", children=[
html.H1(id="DocSimHead", children=["Most similar Documents"]),
DocSpecific(),
]),
BarSpecific(),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/' or \
pathname == '/dashboard/' or \
pathname == '/dashboard/overview' or \
pathname == '/dashboard/overview/':
return overview
if pathname == '/book' or \
pathname == '/dashboard/book' or \
pathname == '/dashboard/book/':
return book
if pathname == '/word' or \
pathname == '/dashboard/word' or \
pathname == '/dashboard/word/':
return word
else:
return "404 Page not found"
@app.callback(Output('specificTitle', 'children'),
[Input('dropdown', 'value')])
def update_specific_title(value):
if not value:
return ["--Select an instance from the dropdown menu--"]
title = [value]
return title
@app.callback(Output('AuthorBox', 'children'),
[Input('dropdown', 'value')])
def update_author_information(value):
data = author_data[author_data.Title == value]
image = data["author_image"].values[0]
if image == "-":
image = app.get_asset_url('profile_dummy.png')
author = data["Author"].values[0].upper()
origin = data["origin"].values[0]
date_birth = data["date_birth"].values[0]
birth_place = data["birth_place"].values[0]
date_death = data["date_death"].values[0]
occupation = data["occupation"].values[0]
pub_date = data["Publishing Date"].values[0]
link = data["author_wikidata_id"].values[0]
if link == "-":
link = "http://www.google.com"
return [html.Div(id="AuthorImage", children=[
html.Img(id="AImg", src=image)
]),
html.Div(id="AuthorData", children=[
html.H1("Author Information"),
html.P(f"Name: {author}"),
html.P(f"Origin: {origin}"),
html.P(f"Born: {date_birth}, {birth_place}"),
html.P(f"Date of death: {date_death}"),
html.P(f"Occupation: {occupation}"),
html.P(f"Publishing date of book: {pub_date}"),
html.Br(),
html.A("Link to Wikidata", href=link, target="_blank")
])
]
@app.callback(Output('PersChart', 'figure'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_pers_chart(value, page):
if not value:
return
if "book" in page:
data = specific_entities[value]["persons"]
persons = [p.title() for p in data["names"]]
quant = data["frequency"]
title = "<b>Most common Persons</b>"
if "word" in page:
persons, quant = get_most_similar_tokens(value, cos_sim_matrix, kind="persons",
num=10, places=None, persons=remaining_persons)
title = "<b>Most similar Persons</b>"
figure = dict(
data=[go.Bar(
x=quant,
y=persons,
orientation='h',
marker={
'color': '#ff4058',
},
)],
layout=dict(
title=title,
font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),
margin=dict(l=10, r=10, t=50, b=30),
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
xaxis=dict(tick0=0, dtick=max(quant)),
yaxis=dict(ticks='outside',
showgrid=True,
showline=False,
showticklabels=False),
annotations=[dict(xref='paper', yref='y',
x=0, y=yd,
font=dict(
color="#000000",
size=19
),
text=str(yd),
showarrow=False) for xd, yd in zip(quant, persons)]
)
)
return figure
@app.callback(Output('DocSim', 'data'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_doc_sim_table(value, page):
if not value:
return
if "book" in page:
data = doc_similarities[value]
books = data["books"]
similarities = data["similarities"]
if "word" in page:
books, similarities = get_most_similar_tokens(value, cos_sim_matrix, kind="docs",
num=10, places=None, persons=None)
books = books[::-1]
similarities = similarities[::-1]
data = [{"Book": books[i], "Similarity": str(round(similarities[i], 4))} for i in range(len(books))]
return data
@app.callback(Output('MapGraph', 'figure'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_map(value, page):
if not value:
return
pl, quant, lon, lat = [], [], [], []
if "book" in page:
places = specific_entities[value]["places"]["names"][-10:]
frequency = specific_entities[value]["places"]["frequency"][-10:]
title = "<b>Most common Places</b>"
for idx, p in enumerate(places):
try:
location = geolocator.geocode(p)
lon += [location.longitude]
lat += [location.latitude]
pl += [f"{p.title()}<br>Frequency: {frequency[idx]}"]
quant += [frequency[idx]]
except:
pass
if "word" in page:
places, similarities = get_most_similar_tokens(value, cos_sim_matrix, kind="places",
num=10, places=remaining_places, persons=None)
title = "<b>Most similar Places</b>"
for idx, p in enumerate(places):
try:
location = geolocator.geocode(p)
lon += [location.longitude]
lat += [location.latitude]
pl += [f"{p.title()}<br>Similarity: {similarities[idx]}"]
quant += [similarities[idx]]
except:
pass
figure = dict(
data=[dict(
type='scattergeo',
lon=lon,
lat=lat,
text=pl,
hoverinfo='text',
marker=dict(
symbol='circle',
color="#B22234",
opacity=0.8,
size=quant,
sizemode='area',
sizeref=max(quant) / (5.**3),
sizemin=2,
line=dict(width=0)
)
)],
layout=dict(
title=title,
font=dict(family='Soria, Times New Roman, Times, serif', color='#B22234', size=19),
dragmode="pan",
geo=dict(
showocean=True,
oceancolor="rgba(0, 44, 119, 0.7)",
showland=True,
landcolor="#ededed",
lonaxis=dict(range=[min(lon) - 10, max(lon) + 10]), # [-125, 35]
lataxis=dict(range=[min(lat) - 10, max(lat) + 10]), # [10, 70]
showcountries=True,
countrywidth=0.5,
subunitwidth=0.5,
projection=dict(type="equirectangular"),
),
margin=dict(l=0, r=0, t=50, b=30),
hovermode="closest",
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
)
)
return figure
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0', port=1880)
```
#### File: Exploring-Historical-Text-Corpora-Using-Word-and-Document-Embeddings/Data_Handling/Link_wikidata.py
```python
import pandas as pd
import requests
from wikidata.client import Client
import time
import datetime
# Link each author of a book with its wikidata page
start = time.time()
def check_human(id):
url = 'https://query.wikidata.org/sparql'
query = 'PREFIX entity: <http://www.wikidata.org/entity/> ' \
'SELECT ?human ' \
'WHERE { entity:' + id + ' wdt:P31 ?human. ' \
'} '
response = requests.get(url, params={'format': 'json', 'query': query})
data = response.json()
if data['results']['bindings']:
if 'Q5' in data['results']['bindings'][0]['human']['value']:
human = True
else:
human = False
return human
df = pd.read_csv('../data/1210_books_info.csv')
index = 0
url = 'https://query.wikidata.org/sparql'
no_identifier = 1
no_author_wiki_id = set()
item_counter = 0
country_counter = 0
birth_counter = 0
death_counter = 0
birthplace_counter = 0
gender_counter = 0
occupation_counter = 0
image_counter = 0
for author in df['Author']:
# add wikidata identifier
if ';' in author:
author = author.split(';')[0]
# https://query.wikidata.org/#SELECT%20distinct%20%3FitemLabel%20%3FitemDescription%20%3Fcountry%20%0A%3Fdate_birth%20%3Fdate_death%20%3Fbirth_place%20%3Fgender%20%3Foccupation%0AWHERE%20%0A%7B%0A%20%20%3FitemLabel%20%3Flabel%20%27John%20G.%20B.%20Adams%27%40en.%0A%20%20%3FitemLabel%20wdt%3AP31%20wd%3AQ5%20.%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP21%20%3Fgender%20.%7D%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP27%20%3Fcountry%20.%7D%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP569%20%3Fdate_birth%20.%7D%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP570%20%3Fdate_death%20.%7D%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP19%20%3Fbirth_place%20.%7D%0A%20%20OPTIONAL%20%7B%20%3FitemLabel%20wdt%3AP106%20%3Foccupation%20.%7D%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%0A%09%09bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20.%0A%09%7D%0A%20%20%7D
# https://query.wikidata.org/#SELECT%20distinct%20%3Fitem%20%3FitemLabel%20%3FitemDescription%20%3FcountryLabel%20%0A%3Fdate_birth%20%3Fdate_death%20%3Fbirth_placeLabel%20%3FgenderLabel%20%3FoccupationLabel%20%3Fimage%0AWHERE%20%0A%7B%0A%20%20%3Fitem%20%3Flabel%20%27Elizabeth%20Cary%20Agassiz%27%40en.%0A%20%20%3Fitem%20wdt%3AP31%20wd%3AQ5%20.%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP21%20%3Fgender%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP27%20%3Fcountry%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP569%20%3Fdate_birth%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP570%20%3Fdate_death%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP19%20%3Fbirth_place%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP106%20%3Foccupation%20.%7D%0A%20%20OPTIONAL%20%7B%20%3Fitem%20wdt%3AP18%20%3Fimage%20.%7D%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%0A%09%09bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20.%0A%09%7D%0A%20%20%7D
sparql_query = 'SELECT distinct ?item ?itemLabel ?itemDescription ?countryLabel ?date_birth ?date_death ' \
'?birth_placeLabel ?genderLabel ?occupationLabel ?image ' \
'WHERE { ' \
'?item ?label \'' + author + '\'@en. ' \
'?item wdt:P31 wd:Q5. ' \
'OPTIONAL { ?item wdt:P21 ?gender .}' \
'OPTIONAL { ?item wdt:P27 ?country .} ' \
'OPTIONAL { ?item wdt:P569 ?date_birth .} ' \
'OPTIONAL { ?item wdt:P570 ?date_death .} ' \
'OPTIONAL { ?item wdt:P19 ?birth_place .} ' \
'OPTIONAL { ?item wdt:P106 ?occupation .}' \
'OPTIONAL { ?item wdt:P18 ?image .}' \
'SERVICE wikibase:label { ' \
'bd:serviceParam wikibase:language "en". ' \
'}' \
'}'
r = requests.get(url, params={'format': 'json', 'query': sparql_query})
response = r.json()
if response['results']['bindings']:
occupation = ""
for key in response['results']['bindings'][0]:
if key == 'item':
wikiid_binding0 = response['results']['bindings'][0]['item']['value']
df.loc[index, 'author_wikidata_id'] = response['results']['bindings'][0]['item']['value']
item_counter += 1
elif key == 'itemLabel':
df.loc[index, 'author'] = response['results']['bindings'][0]['itemLabel']['value']
elif key == 'itemDescription':
df.loc[index, 'author_description'] = response['results']['bindings'][0]['itemDescription']['value']
elif key == 'genderLabel':
df.loc[index, 'gender'] = response['results']['bindings'][0]['genderLabel']['value']
gender_counter += 1
elif key == 'countryLabel':
df.loc[index, 'origin'] = response['results']['bindings'][0]['countryLabel']['value']
country_counter += 1
elif key == 'date_birth': # todo how are only years handled?
birth_dt = datetime.datetime.strptime(response['results']['bindings'][0]['date_birth']['value'], '%Y-%m-%dT%H:%M:%SZ')
df.loc[index, 'date_birth'] = f'{birth_dt.day}/{birth_dt.month}/{birth_dt.year}'
birth_counter += 1
elif key == 'birth_placeLabel':
df.loc[index, 'birth_place'] = response['results']['bindings'][0]['birth_placeLabel']['value']
birthplace_counter += 1
elif key == 'date_death':
death_dt = datetime.datetime.strptime(response['results']['bindings'][0]['date_death']['value'],
'%Y-%m-%dT%H:%M:%SZ')
df.loc[index, 'date_death'] = f'{death_dt.day}/{death_dt.month}/{death_dt.year}'
death_counter += 1
elif key == 'occupationLabel':
df.loc[index, 'occupation'] = response['results']['bindings'][0]['occupationLabel']['value']
occupation_counter += 1
elif key == 'image':
df.loc[index, 'author_image'] = response['results']['bindings'][0]['image']['value']
image_counter += 1
len_bindings = len(response['results']['bindings'])
counter = 0
if len_bindings > 1:
occupation = ""
for occup_result in response['results']['bindings']:
if counter == len_bindings:
break
if 'occupationLabel' in response['results']['bindings'][counter].keys() and response['results']['bindings'][counter]['item']['value'] == wikiid_binding0:
if occupation: # todo string als liste
occupation += ', ' + response['results']['bindings'][counter]['occupationLabel']['value']
else:
occupation += response['results']['bindings'][counter]['occupationLabel']['value']
counter += 1
df.loc[index, 'occupation'] = str(occupation)
index += 1
print(f'--- Processed author: {author} ; Index: {index}')
print(f'{item_counter / index * 100} % items were found on wikidata.')
print(f'{gender_counter / index * 100} % gender information were found on wikidata.')
print(f'{country_counter / index * 100} % origin information were found on wikidata.')
print(f'{birth_counter / index * 100} % birth information were found on wikidata.')
print(f'{birthplace_counter / index * 100} % birthplace information were found on wikidata.')
print(f'{death_counter / index * 100} % death information were found on wikidata.')
print(f'{occupation_counter / index * 100} % occupation information were found on wikidata.')
print(f'{image_counter / index * 100} % images were found on wikidata.')
time.sleep(33)
df.to_csv('../data/' + str(datetime.datetime.now().month) + str(datetime.datetime.now().day) + '_books_info_wiki.csv',
index=False, sep='|')
print(f'Time Tracking: in total it took {time.time()-start}')
``` |
{
"source": "50sven/ros_rllib",
"score": 2
} |
#### File: src/ros_carla_rllib/memories.py
```python
import torch
from collections import deque
class PPOBuffer(object):
"""
Replay Buffer to save samples for PPO
and diagnostic training data
"""
def __init__(self, batch_size, norm_adv=True):
# Samples
self.obs = [[], [], []]
self.actions = []
self.logps = []
self.values = []
self.returns = []
self.advantages = []
# Diagnostics
self.episode_rewards = []
self.episode_lengths = []
self.norm_adv = norm_adv
self.batch_size = batch_size
self.buffer_size = 0
def append(self, obs_t, action_t, logp_t, value_t, return_t, advantage_t):
"""Adds a sample to the buffer"""
self.obs[0].append(obs_t[0])
self.obs[1].append(obs_t[1])
self.obs[2].append(obs_t[2])
self.actions.append(action_t)
self.logps.append(logp_t)
self.values.append(value_t)
self.returns.append(return_t)
self.advantages.append(advantage_t)
self.buffer_size += 1
def eject(self):
"""Prepares and returns the collected batch"""
# Convert batch to tensors
(obs, actions, logps, values, returns, advantages) = self.batch_to_tensor()
# Normalize advantages
if self.norm_adv:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return obs, actions, logps, values, returns, advantages
def batch_to_tensor(self):
"""Transforms batch to torch.Tensors"""
# Convert arrays/vectors to torch.Tensors
xV = torch.Tensor(self.obs[0]).float()
xE = torch.Tensor(self.obs[1]).float()
xO = torch.Tensor(self.obs[2]).float()
# For LSTM
# xO = [torch.Tensor(o).float() for o in self.obs[2]]
obs = [xV, xE, xO]
actions = torch.Tensor(self.actions).float()
logps = torch.Tensor(self.logps).float()
values = torch.Tensor(self.values).float()
returns = torch.Tensor(self.returns).float()
advantages = torch.Tensor(self.advantages).float()
return obs, actions, logps, values, returns, advantages
def flush(self):
"""Clears the buffer"""
self.obs = [[], [], []]
self.actions = []
self.logps = []
self.values = []
self.returns = []
self.advantages = []
self.episode_rewards = []
self.episode_lengths = []
self.buffer_size = 0
def __len__(self):
"""Returns the current batch size"""
return self.buffer_size
class PPOBuffer2(object):
"""
Replay Buffer to save samples for PPO
and diagnostic training data
"""
def __init__(self, batch_size, norm_adv=False):
# Samples
self.obs = [deque(maxlen=batch_size),
deque(maxlen=batch_size),
deque(maxlen=batch_size)]
self.actions = deque(maxlen=batch_size)
self.logps = deque(maxlen=batch_size)
self.values = deque(maxlen=batch_size)
self.returns = deque(maxlen=batch_size)
self.advantages = deque(maxlen=batch_size)
# Diagnostics
self.episode_rewards = deque(maxlen=batch_size)
self.episode_lengths = deque(maxlen=batch_size)
self.norm_adv = norm_adv
self.batch_size = batch_size
self.buffer_size = 0
def append(self, obs_t, action_t, logp_t, value_t, return_t, advantage_t):
"""Adds a sample to the buffer"""
if self.buffer_size < self.batch_size:
self.buffer_size += 1
self.obs[0].append(obs_t[0])
self.obs[1].append(obs_t[1])
self.obs[2].append(obs_t[2])
self.actions.append(action_t)
self.logps.append(logp_t)
self.values.append(value_t)
self.returns.append(return_t)
self.advantages.append(advantage_t)
def eject(self):
"""Prepares and returns the collected batch"""
# Convert batch to tensors
(obs, actions, logps, values, returns, advantages) = self.batch_to_tensor()
# Normalize advantages
if self.norm_adv:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
self.buffer_size = 0
return obs, actions, logps, values, returns, advantages
def batch_to_tensor(self):
"""Transforms batch to torch.Tensors"""
# Convert arrays/vectors to torch.Tensors
xV = torch.Tensor(self.obs[0]).float()
xE = torch.Tensor(self.obs[1]).float()
xO = torch.Tensor(self.obs[2]).float()
# For LSTM
# xO = [torch.Tensor(o).float() for o in self.obs[2]]
obs = [xV, xE, xO]
actions = torch.Tensor(self.actions).float()
logps = torch.Tensor(self.logps).float()
values = torch.Tensor(self.values).float()
returns = torch.Tensor(self.returns).float()
advantages = torch.Tensor(self.advantages).float()
return obs, actions, logps, values, returns, advantages
def __len__(self):
"""Returns the current batch size"""
return self.buffer_size
class A3CMemory(object):
"""
Memory to save n-steps
"""
def __init__(self):
self.log_probs = []
self.entropies = []
self.values = []
self.rewards = []
def store(self, log_prob, entropy, value, reward):
self.log_probs.append(log_prob)
self.entropies.append(entropy)
self.values.append(value)
self.rewards.append(reward)
def get_history(self):
return iter(zip(self.log_probs[::-1],
self.entropies[::-1],
self.values[::-1],
self.rewards[::-1]))
def clear(self):
self.log_probs = []
self.entropies = []
self.values = []
self.rewards = []
```
#### File: src/ros_carla_rllib/policies.py
```python
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, Beta
class Model(nn.Module):
def __init__(self, out_size):
super(Model, self).__init__()
# Visual
self.conv1 = nn.Conv2d(3, 32, 8, 4)
self.conv2 = nn.Conv2d(32, 64, 4, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1)
self.fcV1 = nn.Linear(12288, 512)
# Numeric
self.fcN1 = nn.Linear(18, 128)
self.fcN2 = nn.Linear(128, 128)
# Combined
self.fcC1 = nn.Linear(512 + 128, 256)
# Action
self.fcOut = nn.Linear(256, out_size)
def forward(self, obs):
""" """
xV, xE, _ = obs[0], obs[1], obs[2]
# Visual
xV = F.relu(self.conv1(xV))
xV = F.relu(self.conv2(xV))
xV = F.relu(self.conv3(xV))
xV = torch.flatten(xV, 1)
xV = F.relu(self.fcV1(xV))
# Numeric
xE = F.relu(self.fcN1(xE))
xE = F.relu(self.fcN2(xE))
# Combined
xC = torch.cat([xE, xV], 1)
xC = F.relu(self.fcC1(xC))
# Output
out = self.fcOut(xC)
return out
class Actor(nn.Module):
def get_dist(self, obs):
raise NotImplementedError
def get_logp(self, pi, action):
raise NotImplementedError
def forward(self, obs, action=None):
pi = self.get_dist(obs)
logp = None
if action is not None:
logp = self.get_logp(pi, action)
return pi, logp
class BetaActor(Actor):
def __init__(self, model):
super(BetaActor, self).__init__()
self.model = globals()[model](out_size=4)
def get_dist(self, obs):
concentration = self.model(obs)
alpha = concentration[:, :2]
beta = concentration[:, 2:]
return Beta(alpha, beta)
def get_logp(self, pi, action):
return pi.log_prob(action).sum(1)
class GaussianActor(Actor):
def __init__(self, model):
super(GaussianActor, self).__init__()
self.model = globals()[model](out_size=2)
log_std = np.array([0.55, -0.35], dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
def get_dist(self, obs):
mu = self.model(obs)
std = torch.exp(self.log_std)
return MultivariateNormal(mu, scale_tril=torch.diag_embed(std))
def get_logp(self, pi, action):
return pi.log_prob(action)
class Critic(nn.Module):
def __init__(self, model):
super(Critic, self).__init__()
self.model = globals()[model](out_size=1)
def forward(self, obs):
""" """
return self.model(obs)
class ActorCritic(object):
def __init__(self, model, policy="gaussian", device="cpu"):
policy = policy.capitalize() + "Actor"
self.pi = globals()[policy](model).to(device)
self.value_fn = Critic(model).to(device)
def act(self, obs):
"""Returns (deterministic) action and state value"""
with torch.no_grad():
pi = self.pi.get_dist(obs)
value = self.value_fn(obs)
return pi.mean.numpy(), value.item()
def sample(self, obs):
"""Returns sampled action, log probability and state-value"""
with torch.no_grad():
pi = self.pi.get_dist(obs)
sample = pi.sample()
logp = self.pi.get_logp(pi, sample)
value = self.value_fn(obs)
return sample.numpy(), logp.item(), value.item()
```
#### File: src/ros_carla_rllib/utils.py
```python
import torch
import numpy as np
class RunningStatistics(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4):
self.Mean_ = 0.0
self.Var_ = 1.0
self.count = epsilon
def mean(self):
return self.Mean_
def var(self):
return self.Var_
def std(self):
return np.sqrt(self.Var_)
def update(self, x):
""" """
if isinstance(x, torch.Tensor):
batch_mean = torch.mean(x, axis=0).item()
batch_var = torch.var(x, axis=0).item()
else:
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.Mean_, self.Var_, self.count = self.update_from_moments(self.Mean_, self.Var_, self.count,
batch_mean, batch_var, batch_count)
def update_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
""" """
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
if __name__ == "__main__":
# Testing RunningStatistics()
x = [torch.randn(64), torch.randn(128), torch.randn(256)]
x_cat = torch.cat(x, axis=0)
rms = RunningStatistics()
for x_mb in x:
rms.update(x_mb)
ms1 = [x_cat.mean(0), x_cat.var(0), x_cat.std(0)]
ms2 = [rms.mean(), rms.var(), rms.std()]
print(ms1)
print(ms2)
print("-"*20)
x = [np.random.randn(64), np.random.randn(128), np.random.randn(256)]
x_cat = np.concatenate(x, axis=0)
rms = RunningStatistics()
for x_mb in x:
rms.update(x_mb)
ms1 = [x_cat.mean(0), x_cat.var(0), x_cat.std(0)]
ms2 = [rms.mean(), rms.var(), rms.std()]
print(ms1)
print(ms2)
``` |
{
"source": "510908220/django-backend-template",
"score": 2
} |
#### File: src/app/views.py
```python
from rest_framework import viewsets, authentication, permissions, filters
from rest_framework.pagination import PageNumberPagination
from .models import Sprint, Task
from .serializers import SprintSerializer, UserSerializer, TaskSerializer
from django.contrib.auth import get_user_model
from rest_framework.response import Response
User = get_user_model()
class StandardResultsSetPagination(PageNumberPagination):
page_size = 25
page_size_query_param = 'page_size'
max_page_size = 100
# Create your views here.
class DefaultsMixin(object):
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication
)
permission_classes = (
permissions.IsAuthenticated,
)
pagination_class = StandardResultsSetPagination
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter
)
class SprintViewSet(DefaultsMixin, viewsets.ModelViewSet):
queryset = Sprint.objects.order_by('end')
serializer_class = SprintSerializer
search_fields = ('name',)
ordering_fields = ('end','name')
filter_fields = ('name', )
# 普通的字段匹配这样够了,如果需要实现高级匹配比如日期基于某个范围等,就需要定义自己的FilterSet类了
# http://www.django-rest-framework.org/api-guide/filtering/#djangofilterbackend
class TaskViewSet(DefaultsMixin, viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
search_fields = ('name','description')
ordering_fields = ('name','order','started', 'due','completed')
filter_fields = ('assigned', )
# 用的是pk来过滤user的.
class UserViewSet(DefaultsMixin, viewsets.ReadOnlyModelViewSet):
lookup_field = User.USERNAME_FIELD
lookup_url_kwarg = User.USERNAME_FIELD
queryset = User.objects.order_by(User.USERNAME_FIELD)
serializer_class = UserSerializer
search_fields = (User.USERNAME_FIELD,)
class UserDefineViewSet(viewsets.ViewSet):
"""
有时需要抽象一些资源, 并没有对应的Model,Serializer. 这时需要手动实现ViewSet一些方法以达到类似于ModelViewSet的效果 .
"""
def list(self, request):
# queryset = User.objects.all()
# serializer = UserSerializer(queryset, many=True)
return Response([])
def retrieve(self, request, pk=None):
# queryset = User.objects.all()
# user = get_object_or_404(queryset, pk=pk)
# serializer = UserSerializer(user)
return Response({})
``` |
{
"source": "510908220/heartbeats",
"score": 2
} |
#### File: agent/ansible_util/result_collector.py
```python
import time
from collections import defaultdict
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
Reference: https://github.com/ansible/ansible/blob/v2.0.0.2-1/lib/ansible/plugins/callback/default.py
"""
CALLBACK_VERSION = 2.0
def __init__(self):
super(CallbackModule, self).__init__()
self.start_timestamp = time.time()
self.run_time_sec = 0
self.__result_list = defaultdict(lambda:
{
"ok": [],
"unreachable": [],
"skipped": [],
"failed": []
}
)
self.__summary = {}
def v2_runner_on_failed(self, result, ignore_errors=False):
self.__result_list[result._host.get_name()]["failed"].append(result._result)
def v2_runner_on_ok(self, result):
self.__result_list[result._host.get_name()]["ok"].append(result._result)
def v2_runner_on_skipped(self, result):
self.__result_list[result._host.get_name()]["skipped"].append(result._result)
def v2_runner_on_unreachable(self, result):
self.__result_list[result._host.get_name()]["unreachable"].append(result._result)
def v2_playbook_on_stats(self, stats):
self.run_time_sec = time.time() - self.start_timestamp
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self.summary[h] = t
@property
def summary(self):
return self.__summary
@property
def results(self):
return self.__result_list
```
#### File: heartbeats/agent/script.py
```python
import datetime
import json
import os
import sys
import psutil
def get_memory():
mem = psutil.virtual_memory()
return {
'total': mem.total,
'available': mem.available,
'percent': mem.percent
}
def get_swap_memory():
swap = psutil.swap_memory()
return {
'total': swap.total,
'used': swap.used,
'percent': swap.percent
}
def get_loadavg():
loadavgs = os.getloadavg()
return {
'avg1': float(loadavgs[0]),
'avg5': float(loadavgs[1]),
'avg15': float(loadavgs[2]),
}
def get_cpu_info():
return {
'percent': psutil.cpu_percent(interval=0),
'count': psutil.cpu_count()
}
def get_disk_info():
disk_info = []
for part in psutil.disk_partitions(all=False):
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
# skip cd-rom drives with no disk in it; they may raise
# ENOENT, pop-up a Windows GUI error for a non-ready
# partition or just hang.
continue
usage = psutil.disk_usage(part.mountpoint)
disk_info.append({
'device': part.device,
'total': usage.total,
'used': usage.used,
'free': usage.free,
'percent': usage.percent,
'fstype': part.fstype,
'mountpoint': part.mountpoint
})
return disk_info
def get_process_infos():
procs = []
for p in psutil.process_iter():
try:
p.dict = p.as_dict(['username', 'nice', 'memory_info',
'memory_percent', 'cpu_percent',
'cpu_times', 'name', 'cmdline', 'status'])
except psutil.NoSuchProcess:
pass
else:
procs.append(p)
# return processes sorted by CPU percent usage
procs = sorted(procs, key=lambda p: p.dict['memory_percent'],
reverse=True)
filer_cmdlines = [
'nsshd: npaicbbuser@notty',
]
process_infos = []
for p in procs:
cmdline = " ".join(p.dict['cmdline']).strip()
if not cmdline:
continue
filter_flag = False
for filer_cmdline in filer_cmdlines:
if filer_cmdline in cmdline:
filter_flag = True
break
if filter_flag:
continue
process_infos.append(cmdline.strip())
return process_infos
print(json.dumps({
"memory": get_memory(),
# "swap": get_swap_memory(),
# "loadavg": get_loadavg(),
"cpu": get_cpu_info(),
"disk": get_disk_info(),
"process": get_process_infos()
}))
```
#### File: 510908220/heartbeats/init.py
```python
import os
import subprocess
import time
import traceback
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
UWSGI_CONFIG_FILE = os.path.join(ROOT_DIR, 'uwsgi.ini')
UWSGI_CONFIG_TEMPLATE = """
[uwsgi]
master=true
socket=127.0.0.1:8000
processes=4
socket-timeout=300
reload-mercy=10
vacuum=true
max-requests=1000
limit-as=1024
listen=128
buffer-size=30000
memory-report=true
home={virtualenv_dir}
chdir=/docker/heartbeats/src
module=heartbeats.wsgi:application
"""
SUPERVISOR_CONF = "/etc/supervisor/conf.d/supervisor-app.conf"
UWSGI_SUPERVISOR_TEMPLATE = """
[program:app-uwsgi]
command = {uwsgi_path} --ini {config}
stopsignal=QUIT
redirect_stderr=true
stderr_logfile_maxbytes=1MB
stdout_logfile=/var/log/uwsgi.log
stdout_logfile_maxbytes=1MB
user=root
"""
NGINX_SUPERVISOR_TEMPLATE = """
[program:nginx-app]
command = /usr/sbin/nginx -g 'daemon off;'
"""
DJANGO_Q_TEMPLATE = """
[program:qcluster]
command={python} {project_dir}/manage.py {command}
directory=/
autostart=true
autorestart=false
stopasgroup=true
killasgroup=true
startretries=0
redirect_stderr=true
stderr_logfile_maxbytes=1MB
stdout_logfile=/var/log/qcluster.log
stdout_logfile_maxbytes=1MB
user=root
"""
CHECK_TEMPLATE = """
[program:check]
command={python} {project_dir}/manage.py check
directory=/
autostart=true
autorestart=false
stopasgroup=true
killasgroup=true
startretries=0
redirect_stderr=true
stderr_logfile_maxbytes=1MB
stdout_logfile=/var/log/check.log
stdout_logfile_maxbytes=1MB
user=root
"""
def get_python():
out = subprocess.check_output('pipenv run which python', shell=True)
return out.decode('utf-8').strip()
def get_uwsgi():
out = subprocess.check_output('pipenv run which uwsgi', shell=True)
return out.decode('utf-8').strip()
def update_django_res(python_path):
cmds = [
"{} src/manage.py makemigrations",
"{} src/manage.py migrate",
"{} src/manage.py collectstatic --noinput"
]
for cmd in cmds:
out = subprocess.check_output(cmd.format(python_path), shell=True)
print(out)
def update_uwsgi_config(python_path):
"""
设置虚拟环境目录到uwsgi.ini,写入文件.
"""
with open(UWSGI_CONFIG_FILE, "w") as f:
f.write(
UWSGI_CONFIG_TEMPLATE.format(
virtualenv_dir=os.path.dirname(os.path.dirname(python_path))
)
)
def update_supervisor_config():
"""
将uwsgi和nginx写入supervisor配置.
"""
uwsgi_config = os.path.join(ROOT_DIR, 'uwsgi.ini')
configs = [NGINX_SUPERVISOR_TEMPLATE]
configs.append(UWSGI_SUPERVISOR_TEMPLATE.format(
config=uwsgi_config,
uwsgi_path=get_uwsgi()
))
configs.append(DJANGO_Q_TEMPLATE.format(
command='qcluster',
python=get_python(),
project_dir=os.path.join(ROOT_DIR, "src")
))
configs.append(CHECK_TEMPLATE.format(
python=get_python(),
project_dir=os.path.join(ROOT_DIR, "src")
))
with open(SUPERVISOR_CONF, "w") as f:
f.write("\n\n".join(configs))
def main():
os.chdir(ROOT_DIR)
python_path = get_python()
update_django_res(python_path)
update_uwsgi_config(python_path)
update_supervisor_config()
if __name__ == "__main__":
main()
```
#### File: src/app/serializers.py
```python
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.reverse import reverse
from rest_framework.exceptions import ValidationError
from .models import Tag, Service, Ping
User = get_user_model()
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'name', 'created', 'updated')
class PingSerializer(serializers.ModelSerializer):
service = serializers.SlugRelatedField(
slug_field='name',
required=False,
allow_null=True,
queryset=Service.objects.all()
)
class Meta:
model = Ping
fields = ('id', 'service', 'remote_addr', 'ua', 'data', 'created')
class ServiceSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, read_only=True)
last_ping = serializers.SerializerMethodField()
class Meta:
model = Service
fields = ('id', 'name', 'status', 'tp',
'value', 'grace', 'short_url',
'tags', 'last_ping', 'notify_to',
'created', 'updated')
def get_last_ping(self, obj):
latest_pings = obj.pings.order_by('-id')[:1]
if not latest_pings:
return {}
latest_ping = latest_pings[0]
return PingSerializer(latest_ping).data
```
#### File: src/app/tasks.py
```python
import requests
from django.conf import settings
from django_q.tasks import async
from djmail.template_mail import InlineCSSTemplateMail
def send_djmail(to, service_name, tp, value, grace, msg):
o = InlineCSSTemplateMail('alert')
o.send(to, {
'service_name': service_name,
'tp': tp,
'value': value,
'grace': grace,
'msg': msg
})
def notify(to, service_name, tp, value, grace, msg):
'''
通知函数. 暂时是邮件,后面可以加入微信等
'''
send_djmail(to, service_name, tp, value, grace, msg)
def notify_async(to, service_name, tp, value, grace, msg):
# send this message right away
async('app.tasks.notify',
to,
service_name,
tp,
value,
grace,
msg
)
``` |
{
"source": "510908220/python-code-check",
"score": 2
} |
#### File: management/commands/init_data.py
```python
from django.core.management.base import BaseCommand, CommandError
from django_q.models import Schedule
from django_q.tasks import schedule
class Command(BaseCommand):
help = 'create schedule'
def handle(self, *args, **options):
schedule('app.tasks.update_build_info',
name='update_build_info',
schedule_type=Schedule.MINUTES,
minutes=10,
q_options={
'task_name': 'update_build_info'}
)
```
#### File: python-code-check/src/updatedb.py
```python
import subprocess
import os
import sys
import MySQLdb
import time
import traceback
import stat
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
def wait_db_ok():
def test_db():
db = MySQLdb.connect(os.environ['DB_HOST'], os.environ['DB_USER'], os.environ[
'DB_PASSWORD'], os.environ['DB_NAME'], int(os.environ['DB_PORT']))
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
db.close()
try:
test_db()
return True
except:
print("test db error:", traceback.format_exc())
return False
def update_supervisor_cfg():
print(subprocess.check_output(
"python generate_supervisor_conf.py", shell=True))
def update_db():
cmds = [
"python manage.py makemigrations",
"python manage.py migrate",
"python manage.py collectstatic --noinput"
]
for cmd in cmds:
out = subprocess.check_output(cmd, shell=True)
print(out)
def main():
while not wait_db_ok():
time.sleep(5)
print("db is not ok, wait ....")
old_dir = os.getcwd()
os.chdir(ROOT_DIR)
update_supervisor_cfg()
update_db()
if __name__ == "__main__":
main()
``` |
{
"source": "512mb-xyz/pulseboard",
"score": 3
} |
#### File: 512mb-xyz/pulseboard/pulseboard.py
```python
from pynput import keyboard
import vlc
import re
import random
print("Instantiating the player")
instance = vlc.Instance()
micspam_player = instance.media_player_new()
#get device list
devices = []
mods = micspam_player.audio_output_device_enum()
if mods:
mod = mods
while mod:
mod = mod.contents
devices.append(mod.device)
mod = mod.next
vlc.libvlc_audio_output_device_list_release(mods)
#print the dialogue for selecting the audio output
print("Select audio device: ")
iter = 0
for devname in devices:
print(str(iter)+") "+devname.decode("utf-8"))
iter+=1
device_id = int(input())
while device_id < 0 or device_id > len(devices)-1:
print("No such device")
device_id = int(input())
global device
device = devices[device_id]
#define some functions for help
mode = ""
def exists(dic,key):
try:
return dic[key]
except:
return False
class InputHandler:
def __init__(self):
self.keymap = {}
self.inputbuffer = []
self.mode = ""
def on_press(self,key):
self.inputbuffer.append(key)
if exists(self.keymap,self.mode+"#"+str(self.inputbuffer)):
exec(self.keymap[self.mode+"#"+str(self.inputbuffer)])
def on_release(self,key):
try:
self.inputbuffer.remove(key)
except:
self.inputbuffer.clear()
def bind(self,keymap,comm):
keybind = re.match("(.*#)?(.+)",keymap)
kmode = keybind.group(1) or "#"
name = keyboard.HotKey.parse(keybind.group(2))
self.keymap[kmode[0:-1]+"#"+str(name)] = comm
def unbind(self,keymap):
name = keyboard.HotKey.parse(keymap)
if exists(self.keymap,mode+"#"+str(name)):
self.keymap.pop(mode+"#"+str(name))
def list(self):
for k,v in self.keymap.items():
print(k+": "+v)
def set_mode(self,mode):
self.mode = mode
kmap = InputHandler()
def exec(comm):
global device
if comm.startswith("play "):
filename = re.match("play (.+)",comm).group(1)
micspam_player.set_mrl(filename)
micspam_player.audio_output_device_set(None,device)
micspam_player.play()
if comm.startswith("stop"):
micspam_player.stop()
if comm.startswith("mode"):
mode = re.match("mode([^\n]+)",comm).group(1)
mode = mode.lstrip()
kmap.set_mode(mode)
if comm.startswith("list"):
kmap.list()
if comm.startswith("device-list"):
dev_id = 0
for I in devices:
print(str(dev_id)+") "+I.decode("utf-8"))
dev_id+=1
if comm.startswith("device-set "):
dev_id = re.match("device-set (\d+)",comm).group(1)
device = devices[int(dev_id)]
if comm.startswith("volume"):
volume = re.match("volume ([\+\-]?)(\d+)",comm)
current = micspam_player.audio_get_volume()
if volume == None:
print("Volume: "+str(current))
else:
if volume.group(1) == '':
micspam_player.audio_set_volume(int(volume.group(2)))
elif volume.group(1) == '+':
micspam_player.audio_set_volume(current+int(volume.group(2)))
elif volume.group(1) == "-":
micspam_player.audio_set_volume(current-int(volume.group(2)))
print("Volume: "+str(micspam_player.audio_get_volume()))
if comm.startswith("help"):
print("""
Commands:
help - print this message
play # - play the sound to the currently set device
stop - stop the sound
mode <mode> - set the bind mode
list - list all bindings
device-list - list all devices
device-set - set the output device
exit - exit the program
volume [<-+>]<percent> - set volume (example: volume +5, volume 50)
bindings should be set via ./.pulsebrc file in this form:
[mode#<shift>+k]="command"
where <shift>+k is the keybinding you wish to set (special keys should be wrapped in <>)
and command is the command to execute
""")
try:
rc = open("./.pulsebrc")
text = rc.read()
for match in re.findall("\[([^\]]+)\]=\"([^\"]+)\"\n",text):
kmap.bind(match[0],match[1])
except IOError:
print("No pulsebrc found - creating")
rc = open("./.pulsebrc","w")
exit()
running = True
with keyboard.Listener(on_press=kmap.on_press,on_release=kmap.on_release) as listener:
while running:
term_comm = input("> ")
if term_comm.startswith("exit"):
running = False
else:
exec(term_comm)
``` |
{
"source": "514840279/danyuan-application-cloud",
"score": 2
} |
#### File: controller/service/StartService.py
```python
from common.inc_conn import Conn_mysql
from common.HtmlSource import HtmlSource
from common.Rule import Rule
from controller.service.crawler.CrawlerDetialInfoPage import CrawlerDetialInfoPage
from controller.service.crawler.CrawlerListItem import CrawlerListItem
from controller.service.crawler.CrawlerNavigation import CrawlerNavigation
from controller.service.crawler.CrawlerAllPage import CrawlerAllPage
from controller.dao.CrawlerTaskDao import SysCrawlerGroupInfo
# 启动监控
class StartService():
# 启动爬虫程序
def run(self,task):
## 数据参数获取 ##############
sysCrawlerGroupInfo = SysCrawlerGroupInfo()
groupdata = sysCrawlerGroupInfo.findAllByTaskId(task['uuid'])
# 判断配置的第一个是那种类型的页面进行爬取
for group in groupdata:
if(group['type'] == 'navigation_bar'):# 导航页面
navigation = CrawlerNavigation()
navigation.crawlerNavigation(task,group);
elif (group['type'] == 'listPage'):# 列表页面
listItem = CrawlerListItem()
# 列表页面采集
listItem.crawlerListPage(task, group);
elif (group['type'] == 'detialPage'):# 详细页面
# 详细页面简易版
detial = CrawlerDetialInfoPage()
detial.crawlerDetialPage(task, group);
elif (group['type'] == 'allPage'): # 详细页面
# 详细页面 包含多个分组
crawlerAllPage = CrawlerAllPage()
crawlerAllPage.crawlerAllPage(task, group);
def __init__(self):
pass
if __name__ == '__main__':
pass
``` |
{
"source": "5150brien/retsdk",
"score": 3
} |
#### File: retsdk/retsdk/exceptions.py
```python
class ResponseError(Exception):
def __init__(self, response):
self.response = response
def __str__(self):
return self.response
class RequestError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class AuthenticationError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class TransactionError(Exception):
def __init__(self, transaction_type):
self.transaction_type = transaction_type
def __str__(self):
msg = "The transaction '{0}' is not available".format(self.transaction_type)
return msg
```
#### File: retsdk/tests/test_response_handling.py
```python
import os
import unittest
import xml.etree.ElementTree as ET
from retsdk.utilities import parse_response
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class TestBadRequestResponseHandling(unittest.TestCase):
"""
Tests handling of RETS server responses for invalid requests
"""
def setUp(self):
response_tree = ET.parse(os.path.join(TEST_DIR, 'bad_request.xml'))
xml = response_tree.getroot()
self.response_dict = parse_response(xml)
def test_return_type(self):
"""
Invalid requests should still return a complete response dict
"""
self.assertIsInstance(self.response_dict, dict)
self.assertEqual(len(self.response_dict), 6)
def test_response_data_payload(self):
"""
The 'rows' value should be an empty list (no data payload returned)
"""
self.assertIsInstance(self.response_dict['rows'], list)
self.assertEqual(len(self.response_dict['rows']), 0)
def test_error_reply_code(self):
"""
Reply code for bad requests should be non-null and non-zero
"""
self.assertIsNotNone(self.response_dict['reply_code'])
self.assertNotEqual(self.response_dict['reply_code'], '')
self.assertNotEqual(self.response_dict['reply_code'], '0')
def test_reply_text(self):
"""
Reply text for bad requests should be non-null
"""
self.assertIsNotNone(self.response_dict['reply_text'])
self.assertNotEqual(self.response_dict['reply_text'], '')
def test_ok_value(self):
"""
The response dict's 'ok' val should be False for bad requests
"""
self.assertFalse(self.response_dict['ok'])
def test_more_rows_value(self):
"""
The response dict's 'more_rows' val should be False for bad requests
"""
self.assertFalse(self.response_dict['more_rows'])
class TestSearchResponseWithNoAdditionalRows(unittest.TestCase):
"""
Tests single-response handling for valid search transactions
"""
def setUp(self):
response_tree = ET.parse(
os.path.join(
TEST_DIR,
'search_response.xml'
)
)
xml = response_tree.getroot()
self.response_dict = parse_response(xml)
def test_response_rows(self):
"""
The response dict should contain a list of values (can be empty)
"""
self.assertIsInstance(self.response_dict['rows'], list)
self.assertGreaterEqual(len(self.response_dict['rows']), 0)
def test_ok_value(self):
"""
The response dict's 'ok' val should be True
"""
self.assertTrue(self.response_dict['ok'])
def test_more_rows_value(self):
"""
The response dict's 'more_rows' val should be False
"""
self.assertFalse(self.response_dict['more_rows'])
class TestSearchResponseWithAdditionalRows(unittest.TestCase):
"""
Tests multiple-response handling of valid search transactions
"""
def setUp(self):
response_tree = ET.parse(
os.path.join(
TEST_DIR, 'search_response_maxrows.xml'
)
)
xml = response_tree.getroot()
self.response_dict = parse_response(xml)
def test_response_rows(self):
"""
The response dict should contain a list of values (can be empty)
"""
self.assertIsInstance(self.response_dict['rows'], list)
self.assertGreaterEqual(len(self.response_dict['rows']), 0)
def test_ok_value(self):
"""
The response dict's 'ok' val should be True
"""
self.assertTrue(self.response_dict['ok'])
def test_more_rows_value(self):
"""
The response dict's 'more_rows' val should be True
"""
self.assertTrue(self.response_dict['more_rows'])
``` |
{
"source": "515783034/Faker",
"score": 2
} |
#### File: faker/utils/api_utils.py
```python
import os
import json
import logging
from utils.file_utils import nameBy,jsonPathBy, loadJson, loadUrl, deleteUrl, renameJson
from utils.jsonModel import jsonModel
from utils.log_utils import logDebug, logError, logInfo
from utils.models import fakerSession
from utils.models import UrlModel, GroupModel, ProjectModel, UrlParamModel
methods = ['POST','GET','HEAD','PUT']
class ApiUtils:
def __init__(self, projectName):
self.project = fakerSession.query(ProjectModel).filter(ProjectModel.name == projectName).first()
if self.project:
print(" \033[32mload config: {}\033[0m".format(projectName))
print(" \033[32m首页地址可访问:http://127.0.0.1:5000\033[0m")
else:
logDebug("加载project:{}出错".format(projectName))
# self._flashDataBase()
def getUrlById(self, urlId):
return fakerSession.query(UrlModel).filter(UrlModel.id == urlId).first()
def getUrlByPath(self, path):
return fakerSession.query(UrlModel).filter(UrlModel.url == path).first()
# 根据名称获取 group(没有的话默认取 otherGroup)
def getGroupByUrl(self, groupUrl):
print(self.project.id)
group = fakerSession.query(GroupModel).filter((GroupModel.baseUrl == groupUrl) & (GroupModel.project_id == self.project.id)).first()
print(group.id)
return group if group else self.otherGroup()
'''
@description: 删除某个url
'''
def deleteUrl(self, urlId):
urlModel = self.getUrlById(urlId)
logDebug("[api][delete]url:{},{}".format(urlId, urlModel.url))
# 删除文件
deleteUrl(urlModel.url)
fakerSession.delete(urlModel)
fakerSession.commit()
def deleteOtherParam(self, url, method, paramName, existUrls):
basePath = jsonPathBy(url)
directory = os.path.dirname(basePath)
for file in os.listdir(directory):
pathUrl = directory + file
if file.startswith(basePath) and (not pathUrl in existUrls):
logDebug("[api][delete]param: {} {} {}".format(url, paramName, pathUrl))
os.remove(pathUrl)
# 添加分组
def addGroup(self, groupInfo):
name = groupInfo.get("name")
logDebug("[api][add] group: [{}]".format(name))
group = GroupModel(name=name,desc=name, icon=groupInfo.get("icon"), baseUrl=groupInfo.get("baseUrl"))
self.project.groups.append(group)
fakerSession.commit()
return group
def otherGroup(self):
other = fakerSession.query(GroupModel).filter((GroupModel.desc == "other") & (GroupModel.project_id == self.project.id)).first()
return other if other else self.addGroup({"name": "其他","desc":"other", "icon":"el-icon-connection", "baseUrl":""})
'''
@description: 获取所有的 url(用于restful url)
@return: [url]
'''
def loadRestfulUrls(self):
urls = []
for group in self.project.groups:
urls.extend([url.url for url in group.urls])
return urls
# 获取带参数的接口的本地文件路径
def pathWithParam(self, url, paramValue):
return jsonPathBy(url).replace(".json", "-{}.json".format(paramValue))
# 保存(新增/修改)接口信息到数据库
def saveUrl(self, urlId, url, name, method, param, groupUrl):
name = name if name else ""
paramName = param.get("name", "")
paramType = param.get("type", "")
paramValues = param.get("values", [])
if urlId == -1:
if self.getUrlByPath(url):
return "存在同名 url,请检查后重试"
urlModel = UrlModel(url=url, name=name, method=method, param=paramName, paramType=paramType)
self.urlAddParams(urlModel, paramValues)
else:
urlModel = self.getUrlById(urlId)
# 若果修改了 url,需要修改对应的 json 文件
if urlModel.url != url:
renameJson(urlModel.url, url, urlModel.method)
urlModel.url = url
urlModel.name = name
urlModel.method = method
urlModel.param = paramName
urlModel.paramType = paramType
self.urlAddParams(urlModel, paramValues)
group = self.getGroupByUrl(groupUrl)
group.urls.append(urlModel)
fakerSession.commit()
# 更新 url 的参数
def urlAddParams(self, urlModel, paramValues):
# 移除旧的参数
for oldParam in urlModel.params:
fakerSession.delete(oldParam)
# 添加新参数
for value in paramValues:
urlParam = UrlParamModel(value=value, url_id=urlModel.id)
urlModel.params.append(urlParam)
'''
@description: 获取url对应的接口详细信息
'''
def getUrlDetail(self,urlId):
if (urlId == -1):
return self._emptyUrlDetail()
urlModel = fakerSession.query(UrlModel).filter(UrlModel.id == urlId).first()
if not urlModel:
return self._emptyUrlDetail()
result = urlModel.toDict()
if urlModel.param != None and len(urlModel.params)>0:
values = []
jsons = []
for param in urlModel.params:
values.append(param.value)
urlPath = self.pathWithParam(urlModel.url, param.value)
jsons.append(loadUrl(urlPath))
result["param_values"] = values
result["param_jsons"] = jsons
result["selectParamValue"] = values[0]
else:
result["content"] = loadUrl(urlModel.url)
urlInfo = {
"methods": self.getMethods(),
"sections": self.getSectionsDesc(),
"urlInfo": result,
"state": "修改" if urlId != -1 else "添加"}
if urlModel:
urlInfo["selectTitle"] = urlModel.group.baseUrl
return urlInfo
# 新建接口时的信息
def _emptyUrlDetail(self):
return {
"methods": self.getMethods(),
"sections": self.getSectionsDesc(),
"urlInfo": {"url": "", "name":"", "desc": "", "method":""},
"state": "添加"}
'''
@description: 获取所有分组接口
@return: [section]
'''
def groupsInfo(self):
groups = []
for group in self.project.groups:
newGroup = group.toDict()
newGroup["urls"] = [x.toDict() for x in group.urls]
groups.append(newGroup)
return groups
'''
@description: 获取所有支持的方法名
@return: [method]
'''
def getMethods(self):
return methods
'''
@description: 获取分组的基本信息
'''
def getSectionsDesc(self):
return [{"name":group.name,"baseUrl":group.baseUrl} for group in self.project.groups]
# def _flashDataBase(self):
# # cachedUrls = []
# for group in self.project.groups:
# if group.id != 5:
# continue
# for urlInfo in group.urls:
# urlInfo.group_id = 15
# fakerSession.commit()
``` |
{
"source": "515783034/HHOfficeOrder",
"score": 2
} |
#### File: HHOfficeOrder/Classes/app.py
```python
from flask import Flask, render_template, request, jsonify
import config
from exts import db, fetch_data, fetch_week, time_desc
from models import OfficeModel, OrderModel
import time
import flask_whooshalchemyplus
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
flask_whooshalchemyplus.init_app(app)
app.jinja_env.variable_start_string = '{{ '
app.jinja_env.variable_end_string = ' }}'
with app.app_context():
db.create_all()
@app.route('/')
def hello_world():
weeks = fetch_week()
# 办公室信息
get_office = request.args.get('office')
office = OfficeModel.query.filter(get_office == OfficeModel.name).first()
all_offices = OfficeModel.query.all()
offices = [x.desc for x in all_offices]
if office:
office_desc = office.desc
else:
office_desc = '五味子'
results = fetch_data()
return render_template('date_picker.html', time_span=results, weeks=weeks, offices=offices, select_office=office_desc)
@app.route('/canSelect/')
def can_select():
start = int(request.args.get('start'))
end = int(request.args.get('end'))
week = int(request.args.get('week'))
office = request.args.get('office')
ordered = ordered_time(office, week)
new_orders = []
for aOrder in ordered:
new_orders.extend(range(aOrder.order_from, aOrder.order_to))
select_range = list(range(start, end))
inter = set(select_range).intersection(set(new_orders))
if len(inter) > 0:
return "false"
else:
return "true"
@app.route('/search/')
def search():
search_text = request.args.get('query')
orderes = OrderModel.query.whoosh_search(search_text).all()
ordered = order_convert(orderes)
return render_template('order_list.html', ordered=ordered)
@app.route('/orderlist/')
def order_list():
orderes = OrderModel.query.all()
ordered = order_convert(orderes)
return render_template('order_list.html', ordered=ordered)
def order_convert(orders):
ordered = []
for order in orders:
ord_convert = order.to_dict()
ord_convert['time_desc'] = time_desc(order.order_from, order.order_to + 1, order.order_day)
ordered.append(ord_convert)
return ordered
# 获取已被选择的内容
@app.route('/getOrdered/')
def get_ordered():
office = request.args.get('office')
week = request.args.get('week')
ordered = ordered_time(office, int(week))
order_json = jsonify({'data': [x.to_dict() for x in ordered]})
return order_json
@app.route('/submit/', methods=['POST'])
def submit():
user_name = request.form.get('user_name')
user_depart = request.form.get('user_depart')
reason = request.form.get('reason')
start_hour = request.form.get('start_hour')
end_hour = request.form.get('end_hour')
select_week = request.form.get('select_week', '0')
office = request.form.get('office').strip()
office_model = OfficeModel.query.filter(OfficeModel.desc == office).first()
if not office_model:
return '未能查到办公室' + '"' + office + '"'
new_order = OrderModel(user_name=user_name,depart_name=user_depart,reason=reason,order_from=start_hour, order_to=end_hour, office_id=office_model.id)
new_order.order_day = time_offset(select_week)
db.session.add(new_order)
db.session.commit()
flask_whooshalchemyplus.index_one_model(OrderModel)
return ''
# 管理页面
@app.route('/manage/')
def manage():
return render_template('office_manage.html')
@app.route('/addOffice/', methods=['POST'])
def add_office():
name = request.form.get('name')
desc = request.form.get('desc')
same_names = OfficeModel.query.filter(OfficeModel.name == name).all()
if same_names:
return '存在同名name:{},放弃添加'.format(name)
same_descs = OfficeModel.query.filter(OfficeModel.desc == desc).all()
if same_descs:
return '存在同名desc:{},放弃添加'.format(name)
office1 = OfficeModel(name=name, desc=desc)
db.session.add(office1)
db.session.commit()
return ''
# 获取偏移后的时间戳
def time_offset(select):
offset = int(select) * 86400
return offset + int(time.time())
# 获取某一天的时间戳差
def time_duration(offset):
cur_time = time.time()
start = cur_time - cur_time % 86400 + 86400 * offset
return [start, start + 86400]
# 获取某天的预订信息
def ordered_time(office, week):
office_model = OfficeModel.query.filter(OfficeModel.desc == office.strip()).first()
if not office_model:
return []
duration = time_duration(int(week))
ordered = OrderModel.query.filter((OrderModel.office_id == office_model.id) & (OrderModel.order_day > duration[0]) & (OrderModel.order_day <= duration[1])).all()
return ordered
if __name__ == '__main__':
app.run()
``` |
{
"source": "515handwritten-expression/515-handwritten-mathematical-expression",
"score": 3
} |
#### File: 515-handwritten-mathematical-expression/handwritten_math_expression/ImagePreprocessing.py
```python
import xml.etree.ElementTree as ET
from cv2 import cv2
import numpy as np
import glob
import os
import shutil
import pickle
from skimage.morphology import skeletonize
STANDARD_SIZE = 32
symbol = ['alpha','beta','gamma','phi','pi','geq','leq','pm','theta','infty','div','times','sum','ldots','neq','rightarrow','int','sqrt', 'exists','forall','in']
#Read image from file
#Convert original image before image segmentation
def imgReadAndConvert(imgfilename):
original_img = cv2.imread(imgfilename)
# inspect error object
img_copy = original_img.copy()
height = img_copy.shape[0]
width = img_copy.shape[1]
#Image resize(prevent overflow)
if(height*width*1000)> 2^31:
resize = img_copy
elif(height * width > 2^31):
resize = cv2.resize(img_copy, dsize =(0.0001, int(0.01*height/width)), interpolation = cv2.INTER_AREA)
else:
resize = cv2.resize(img_copy, dsize =(1000, int(1000*height/width)), interpolation = cv2.INTER_AREA)
#GaussianBlur
blur = cv2.GaussianBlur(resize,(5,5),0)
#Image graying: reduce the influence of color
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
#Noises removal from outside the contours
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
img_open = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
#Covert image into binary
bin_img = cv2.adaptiveThreshold(img_open,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,21,20)
#Reverse image color
bin_img = cv2.bitwise_not(bin_img)
return bin_img
#Get croppped images and skeletonize
def imgSkeleton(original_img):
ret,binary_img = cv2.threshold(original_img,0,1,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
extracted_img = skeletonize(binary_img)
skeleton = extracted_img.astype(np.uint8) * 255
skeleton = cv2.bitwise_not(skeleton)
return skeleton
def getVerticalProjectionSegmentationPoints(img):
height, width = img.shape[:2]
W = np.zeros(width,dtype=np.int32)
for x in range(0, width):
for y in range(0, height):
if img[y,x] == 0:
W[x] += 1
start = 0
W_Start = []
W_End = []
for j in range(len(W)):
if W[j] > 0 and start ==0:
W_Start.append(j)
start = 1
if W[j] <= 0 and start == 1:
W_End.append(j)
start = 0
if len(W_End) == 0:
W_End.append(width)
return W_Start, W_End
def getHorizontalProjectionSegmentationPoints(img):
height, width = img.shape[:2]
H = np.zeros(height,dtype=np.int32)
for y in range(0, height):
for x in range(0, width):
if img[y,x] == 0:
H[y] += 1
start = 0
H_Start = []
H_End = []
for i in range(len(H)):
if H[i] > 0 and start ==0:
H_Start.append(i)
start = 1
if H[i] <= 0 and start == 1:
H_End.append(i)
start = 0
return H_Start, H_End
#Crop the original image along with the horizontal direction
#For each cropped images, crop the image along with the vertical direction
#Return the final segmenting position
def projectionSegmentation(img):
h,w =img.shape[:2]
Position = []
imgs = []
H_Start, H_End = getHorizontalProjectionSegmentationPoints(img)
#For each cropped images, crop the image along with the vertical direction
for j in range(len(H_Start)):
cropImg = img[H_Start[j]:H_End[j], 0:w]
W_Start, W_End = getVerticalProjectionSegmentationPoints(cropImg)
for x in range(len(W_Start)):
Position.append([W_Start[x],H_Start[j],W_End[x],H_End[j]])
img_aftercrop = img[H_Start[j]:H_End[j], W_Start[x]:W_End[x]]
imgs.append(img_aftercrop)
return imgs, Position
#After projection segmentation, for each cropped image,
#cropped off the white space and only keep the character
#Resize and skeletonize the cropped images and store th images
#and positions into a list of dictionaries
def imgStandardize(imgs,Position):
character_list = []
loc = []
for i in range(len(imgs)):
#Crop horizontally
H_Start, H_End = getHorizontalProjectionSegmentationPoints(imgs[i])
#Crop vertically
W_Start, W_End = getVerticalProjectionSegmentationPoints(imgs[i])
#If projection values are not zero at the very end,
# set w or h as the end point of croping
if(len(W_End)<len(W_Start)):
W_End.append(imgs[i].shape[1])
if(len(W_End) == 0):
W_End.append(imgs[i].shape[1])
if(len(H_End)<len(H_Start)):
H_End.append(imgs[i].shape[0])
if(len(H_End) == 0):
H_End.append(imgs[i].shape[0])
char_img = imgs[i][H_Start[0]:H_End[-1],W_Start[0]:W_End[-1]]
h, w = char_img.shape[:2]
W1 = Position[i][0]
H1 = Position[i][1]
W2 = Position[i][2]
H2 = Position[i][3]
loc.append([W1+W_Start[0],H1+H_Start[0],W2-(imgs[i].shape[1]-W_End[-1]),H2-(imgs[i].shape[0]-H_End[-1])])
#Resize the image to the standard size of 32 * 32
standard_background = np.zeros((STANDARD_SIZE,STANDARD_SIZE),np.uint8)
if(h > w):
img_resize = cv2.resize(char_img, ((int)(w * STANDARD_SIZE / h), STANDARD_SIZE), interpolation=cv2.INTER_AREA)
difference = int((STANDARD_SIZE - img_resize.shape[1]) / 2)
img_resize = cv2.bitwise_not(img_resize)
standard_background[0:STANDARD_SIZE, difference:img_resize.shape[1] + difference] = img_resize
else:
if((int)(h * STANDARD_SIZE / w)<1):
new_h = 1
else:
new_h = (int)(h * STANDARD_SIZE / w)
img_resize = cv2.resize(char_img, (STANDARD_SIZE, new_h), interpolation=cv2.INTER_AREA)
difference = int((STANDARD_SIZE - img_resize.shape[0]) / 2)
img_resize = cv2.bitwise_not(img_resize)
standard_background[difference:img_resize.shape[0] + difference, 0:STANDARD_SIZE] = img_resize
standard_img = cv2.bitwise_not(standard_background)
#Skeletonize the image
standard_img = imgSkeleton(standard_img)
character_list.append(standard_img)
standard_imgs=[]
#Store the location and segment images in a list
for i in range(len(character_list)):
standard_imgs.append({'location':loc[i],'segment_img':character_list[i]})
#Sort the images along with the horizontal direction
#standard_imgs.sort(key=lambda x:x['location'][0])
return standard_imgs
#Segment test images
'''def testImageSegementation(filepath):
files = glob.glob(filepath)
# path is 'data/testData/*.png'
num = 0
for filename in files:
fileid = filename[13:-4]
num += 1
print(fileid + ':successfully loaded' + '-' + str(num))
#Read in original image
#Convert image
binimg = imgReadAndConvert(filename)
cropimgs, Position = projectionSegmentation(binimg)
imgs = imgStandardize(cropimgs,Position)
img_list = []
img_loc = []
for i in range(len(imgs)):
img_list.append(imgs[i]['segment_img'])
img_loc.append(imgs[i]['location'])
path = os.path.join('data/testPNGSeg', fileid)
if(os.path.exists(path)):
shutil.rmtree(path)
os.mkdir(path)
for index, img in enumerate(img_list, start=1):
if index < 10:
strindex = '0' + str(index)
else:
strindex = str(index)
imgpath = path + '/' + strindex + '.png'
cv2.imwrite(imgpath, img)
#Store the position as a pickle file
picklepath = path + '/' + fileid + '.pkl'
with open(picklepath,"wb") as f_dump:
pickle.dump(img_loc, f_dump)
f_dump.close()'''
# Read ground truth from inkml file (for training)
def readCharacterListFromInkmlFile(filename):
ground_truth = [] # start with $, end with $
with open(filename, 'r') as file:
tree = ET.parse(file)
root = tree.getroot()
for annotation in root.findall('{http://www.w3.org/2003/InkML}annotation'):
if (annotation.get('type')) == 'truth':
ground_truth.append(annotation.text)
parsedgt = groundTruthParser(ground_truth)
return parsedgt
# Parse ground truth into a list of symbols
def groundTruthParser(ground_truth):
gt = ground_truth[0]
gt = gt.replace(' ','')
gt = gt.replace('$','')
gt = gt.replace('\\gt','>')
gt = gt.replace('\\lt','<')
gt = gt.replace('\\log','log')
gt = gt.replace('\\cos','cos')
gt = gt.replace('\\tan','tan')
gt = gt.replace('\\sin','sin')
gt = gt.replace('\\lim','lim')
gt_list = []
for i in gt:
if(len(gt)==0):
break
i = gt[0]
gt = gt[1:]
if(i == '\\'):
for s in symbol:
if(gt[0:len(s)] == s):
gt_list.append(s)
gt = gt[len(s):]
else:
gt_list.append(i)
return gt_list
# Segment training data and label the character with according ground truth
'''def trainImageSegementation(inkmlfilepath):
files = glob.glob(inkmlfilepath)
num = 0
for inkmlfilename in files:
num +=1
# path = 'data/trainData/*.inkml'
fileid = inkmlfilename[15:-6]
pngfilename = 'data/trainPNG/' + fileid + '.png'
if(not os.path.isfile(pngfilename)):
print(fileid +': PNG file not exist' + '-' + str(num))
continue
print(fileid + ':successfully loaded' + '-' + str(num))
ground_truth = readCharacterListFromInkmlFile(inkmlfilename)
binimg = imgReadAndConvert(pngfilename)
cropimgs, Position = projectionSegmentation(binimg)
imgs = imgStandardize(cropimgs,Position)
if(len(imgs) == len(ground_truth)):
for i in range(len(imgs)):
path = os.path.join('data/trainPNGSeg', ground_truth[i])
if(os.path.exists(path)):
pathpng = path + '/*.png'
imgpath = path + '/' + ground_truth[i] + '_' + str(len(glob.glob(pathpng))+1) + '.png'
else:
os.mkdir(path)
imgpath = path + '/' + ground_truth[i] + '_1.png'
cv2.imwrite(imgpath, imgs[i]['segment_img'])'''
#filepath = 'data/trainData/*.inkml'
#trainImageSegementation(filepath)
#filepath = 'data/predPNG/*.png'
#testImageSegementation(filepath)
# %
```
#### File: 515-handwritten-mathematical-expression/tests/test_stringCalculation.py
```python
import unittest
import handwritten_math_expression.stringCalculation as XTC
from handwritten_math_expression.stringCalculation import IncalculableError
import os.path, math
class TestCalculation(unittest.TestCase):
def testNode(self):
node = XTC.Node("1")
self.assertEqual(node.value,"1")
self.assertEqual(node.left,None)
self.assertEqual(node.right, None)
def testConstant(self):
self.assertEqual(XTC.expressionTree("pi"),3.1416)
self.assertEqual(XTC.expressionTree("e"),2.7183)
self.assertEqual(XTC.expressionTree("1"), 1)
self.assertEqual(XTC.expressionTree(""), 0)
def testCalculationPlus(self):
self.assertEqual(XTC.expressionTree("1+2"),3.0)
self.assertEqual(XTC.expressionTree("1.5+2"),3.5)
self.assertEqual(XTC.expressionTree("pi+1"),4.1416)
def testCalculationMinus(self):
self.assertEqual(XTC.expressionTree("1-2"),-1)
self.assertEqual(XTC.expressionTree("1-(0-2)"),3)
self.assertEqual(XTC.expressionTree("1-((0-2))"),3)
self.assertEqual(XTC.expressionTree("1.1-0.2"),0.9)
def testCalculationTimes(self):
self.assertEqual(XTC.expressionTree("1*2"),2)
self.assertEqual(XTC.expressionTree("2*0"),0)
self.assertEqual(XTC.expressionTree("(0*0)"),0)
self.assertEqual(XTC.expressionTree("1.1*0.2"),0.22)
def testCalculationDiv(self):
self.assertEqual(XTC.expressionTree("(1/(1+1))"),0.5)
self.assertEqual(XTC.expressionTree("0/2"),0)
def testCalculationPower(self):
self.assertEqual(XTC.expressionTree("1^2"),1.0)
self.assertEqual(XTC.expressionTree("2^2^2"),16.0)
def testInfixToPostfixNo1(self):
result = []
expected = ['3', '1', '-', '1', '^']
postfix = XTC.infixToPostfix("(3-1)^1")
for node in postfix:
result += node.value
self.assertCountEqual(result, expected)
self.assertListEqual(result, expected)
def testInfixToPostfixNo2(self):
result = []
expected = ['1', '2', '^', '2', '+', '3', '*', '1', '/']
postfix = XTC.infixToPostfix("(1^2+2)*3/1")
for node in postfix:
result += node.value
self.assertCountEqual(result, expected)
self.assertListEqual(result, expected)
def testInfixToPostfixNo1(self):
result = []
expected = []
postfix = XTC.infixToPostfix("")
for node in postfix:
result += node.value
self.assertCountEqual(result, expected)
self.assertListEqual(result, expected)
def testIncalculableError(self):
self.assertRaises(IncalculableError, XTC.expressionTree, "2/0")
self.assertRaises(IncalculableError, XTC.expressionTree, "2neq0")
self.assertRaises(IncalculableError, XTC.expressionTree, "1+")
self.assertRaises(IncalculableError, XTC.expressionTree, "^1")
self.assertRaises(IncalculableError, XTC.expressionTree, "x=1")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "515hikaru/essence-of-machine-learning",
"score": 3
} |
#### File: numeric_calc/quad_equation/qeq.py
```python
import numpy as np
def qeq(a, b, c):
"""
Solve quadratic equation.
"""
d = np.sqrt(b**2 - 4 * a * c)
return ((-b + d) / (2 * a)), ((-b - d)/ (2 * a))
def qeq2(a, b, c):
"""
Solve quadratic equation with less error.
"""
d = np.sqrt(b**2 - 4 * a * c)
alpha = (-b - np.sign(b) * d) / (2 * a)
beta = c / (a * alpha)
return alpha, beta
```
#### File: essence-of-machine-learning/tests/test_qeq.py
```python
import unittest
import pytest
from numeric_calc.quad_equation import qeq
class TestQuadEquation(unittest.TestCase):
"""
2次方程式を解くテスト
"""
def test_solve_simple_eq(self):
"""x^2+5x+6を解く"""
eps = 1e-18
alpha, beta = qeq.qeq(1, 5, 6)
self.assertAlmostEqual(alpha, -2.0, delta=eps)
self.assertAlmostEqual(beta, -3.0, delta=eps)
def test_solve_simple_eq2(self):
"""x^2-5x+6を解く"""
eps = 1e-18
alpha, beta = qeq.qeq(1, -5, 6)
self.assertAlmostEqual(alpha, 3.0, delta=eps)
self.assertAlmostEqual(beta, 2.0, delta=eps)
def test_solve_less_error_eq(self):
"""x^2+5x+6を解く"""
eps = 1e-18
alpha, beta = qeq.qeq2(1, 5, 6)
self.assertAlmostEqual(alpha, -3.0, delta=eps)
self.assertAlmostEqual(beta, -2.0, delta=eps)
def test_solve_less_error_eq2(self):
"""x^2-5x+6を解く"""
eps = 1e-18
alpha, beta = qeq.qeq2(1, -5, 6)
self.assertAlmostEqual(alpha, 3.0, delta=eps)
self.assertAlmostEqual(beta, 2.0, delta=eps)
@pytest.mark.xfail
def test_solve_error_large_fail(self):
"""
誤差が大きくなるテストケース
誤差対策ができていないケースなのでこのテストは失敗する
(x+0.000000001)(x+1) = 0
"""
eps = 1.0 * (10 ** (-18))
alpha, beta = qeq.qeq(1, 1.000000001, 0.000000001)
self.assertAlmostEqual(beta, -1.0, delta=eps)
self.assertAlmostEqual(alpha, -0.000000001, delta=eps) # fail here
def test_solve_error_large(self):
"""
誤差が大きくなるテストケース
(x+0.000000001)(x+1) = 0
"""
eps = 1e-18
alpha, beta = qeq.qeq2(1, 1.000000001, 0.000000001)
self.assertAlmostEqual(alpha, -1.0, delta=eps)
self.assertAlmostEqual(beta, -0.000000001, delta=eps)
``` |
{
"source": "515hikaru/junk-code",
"score": 3
} |
#### File: python_injector/cache/cache.py
```python
from abc import ABCMeta, abstractclassmethod
from typing import Any, Dict
from injector import inject
class ICache(metaclass=ABCMeta):
@abstractclassmethod
def get(self, key: str):
pass
@abstractclassmethod
def set(self, key: str, value: Any):
pass
class DictCache(ICache):
def __init__(self):
self.content = {'key': 'value'}
def get(self, key: str) -> Any:
return self.content.get(key, None)
def set(self, key: str, value: Any) -> None:
self.content[key] = value
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.