metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jkbradley/spark-ml",
"score": 3
} |
#### File: python/ml/parameters.py
```python
class Param:
def __init__(self, parent, name, doc, defaultValue):
self.parent = parent
self.name = name
self.doc = doc
self.defaultValue = defaultValue
def withValue(self, value):
return ParamPair(self, value)
def __str__(self):
return "{0}/{1}: {2} (default: {3})".format(self.parent, self.name, self.doc, self.defaultValue)
def __repr__(self):
return "{0}/{1}".format(self.parent, self.name)
class ParamPair:
def __init__(self, param, value):
assert isinstance(param, Param)
self.param = param
self.value = value
class ParamMap:
def __init__(self):
self.params = {}
def put(self, param, value):
self.params[param] = value
return self
def getOrDefault(self, param):
return self.params[param] if param in self.params else param.defaultValue
def copy(self):
newMap = ParamMap()
newMap.params = self.params.copy()
return newMap
def __repr__(self):
return self.params.__repr__()
class ParamGridBuilder:
def __init__(self):
self.paramGrid = {}
def add(self, param, value):
return self.addMulti(param, [value,])
def addMulti(self, param, values):
self.paramGrid[param] = values
return self
def build(self):
paramMaps = [ParamMap(),]
for (param, values) in self.paramGrid.items():
newParamMaps = []
for paramMap in paramMaps:
for v in values:
newParamMap = paramMap.copy()
newParamMap.put(param, v)
newParamMaps.append(newParamMap)
paramMaps = newParamMaps
return paramMaps
``` |
{
"source": "jkbrandt/loudml",
"score": 2
} |
#### File: loudml/loudml/dummystorage.py
```python
from .storage import Storage
class DummyStorage(Storage):
"""
Dummy Loud ML storage for testing
"""
def model_exists(self, name):
return False
def template_exists(self, name):
return False
def get_model_data(self, name, ckpt_name=None):
return {}
def get_template_data(self, name):
return {}
def list_models(self):
return []
def list_checkpoints(self, name):
return []
def list_templates(self):
return []
def create_model(self, model):
pass
def delete_model(self, name):
pass
def create_template(self, template):
pass
def delete_template(self, name):
pass
def save_model(self, model, save_state=True):
pass
def save_state(self, model, ckpt_name=None):
pass
def set_current_ckpt(self, model_name, ckpt_name):
pass
def get_current_ckpt(self, model_name):
return None
def load_model(self, name, ckpt_name=None):
return None
def load_model_from_template(self, _name, *args, **kwargs):
return None
def get_model_hook(self, model_name, hook_name):
return None
def list_model_hooks(self, model_name):
return []
def set_model_hook(self, model_name, hook_name, hook_type, config=None):
pass
def delete_model_hook(self, model_name, hook_name):
pass
```
#### File: loudml/loudml/errors.py
```python
class LoudMLException(Exception):
"""Loud ML exception"""
code = 500
def __init__(self, msg=None):
super().__init__(msg or self.__doc__)
class Conflict(LoudMLException):
"""Conflict"""
code = 409
class BucketError(LoudMLException):
"""Error occured on bucket query"""
code = 500
def __init__(self, bucket, error=None):
self.bucket = bucket
self.error = error or self.__doc__
def __str__(self):
return "bucket[{}]: {}".format(self.bucket, self.error)
class BucketNotFound(BucketError):
"""Bucket not found"""
code = 404
def __str__(self):
return "{} (name = '{}')".format(self.error, self.bucket)
class Invalid(LoudMLException):
"""Data is invalid"""
code = 400
def __init__(self, error, name=None, path=None, hint=None):
self.error = error
self.name = name
self.path = path
self.hint = hint
def __str__(self):
hint = "" if self.hint is None else " ({})".format(self.hint)
if self.path is None or len(self.path) == 0:
return "{} is invalid: {}{}".format(
self.name or "data",
self.error,
hint,
)
else:
path = '.'.join([str(key) for key in self.path])
return "invalid field {}: {}{}".format(path, self.error, hint)
class LimitReached(LoudMLException):
"""Limit reached"""
code = 429
class ModelExists(LoudMLException):
"""Model exists"""
code = 409
class ModelNotFound(LoudMLException):
"""Model not found"""
code = 404
def __init__(self, name=None, version=None):
self.name = name
self.version = version
def __str__(self):
if self.version and self.name:
name = " ({} version {})".format(self.name, self.version)
else:
name = "" if self.name is None else " ({})".format(self.name)
return "Model{} not found".format(name)
class ModelNotTrained(LoudMLException):
"""Model not trained"""
code = 400
class UnsupportedBucket(LoudMLException):
"""Unsupported bucket type"""
code = 501
def __init__(self, bucket_type, error=None):
self.bucket_type = bucket_type
self.error = error or self.__doc__
def __str__(self):
return "{} (type = '{}')".format(self.error, self.bucket_type)
class UnsupportedMetric(LoudMLException):
"""Unsupported metric"""
code = 501
def __init__(self, metric, error=None):
self.metric = metric
self.error = error or self.__doc__
def __str__(self):
return "{} (type = '{}')".format(self.error, self.metric)
class UnsupportedModel(LoudMLException):
"""Unsupported model"""
code = 501
def __init__(self, model_type, error=None):
self.model_type = model_type
self.error = error or self.__doc__
def __str__(self):
return "{} (type = '{}')".format(self.error, self.model_type)
class Forbidden(LoudMLException):
"""Forbidden"""
code = 403
class NotFound(LoudMLException):
"""Not found"""
code = 404
class NoData(NotFound):
"""No data"""
class TransportError(LoudMLException):
"""
Exception raised when LML returns a non-OK (>=400) HTTP status code.
Or when an actual connection error happens; in that case the
``status_code`` will be set to ``'N/A'``.
"""
code = 503
@property
def status_code(self):
"""
The HTTP status code of the response that precipitated the error or
``'N/A'`` if not applicable.
"""
return self.args[0]
@property
def error(self):
""" A string error message. """
return self.args[1]
@property
def info(self):
"""
Dict of returned error info from LML, where available, underlying
exception when not.
"""
return self.args[2]
def __str__(self):
cause = ''
try:
if self.info:
cause = ', %r' % self.info['error']['root_cause'][0]['reason']
except LookupError:
pass
return '%s(%s, %r%s)' % (
self.__class__.__name__, self.status_code, self.error, cause)
class ConnectionError(TransportError):
"""
Error raised when there was an exception while talking to LML. Original
exception from the underlying :class:`~elasticsearch.Connection`
implementation is available as ``.info.``
"""
def __str__(self):
return 'ConnectionError(%s) caused by: %s(%s)' % (
self.error, self.info.__class__.__name__, self.info)
class SSLError(ConnectionError):
""" Error raised when encountering SSL errors. """
class ConnectionTimeout(ConnectionError):
""" A network timeout. Doesn't cause a node retry by default. """
def __str__(self):
return 'ConnectionTimeout caused by - %s(%s)' % (
self.info.__class__.__name__, self.info)
```
#### File: loudml/loudml/mongo.py
```python
import logging
import math
import numpy as np
import pymongo
from voluptuous import (
All,
Length,
Optional,
Required,
)
from . import (
errors,
schemas,
)
from .misc import (
make_ts,
parse_addr,
)
from loudml.bucket import Bucket
def _tk(key):
return "$" + key
def _build_query(feature, timestamp_field, boundaries):
field = feature.field
metric = feature.metric
group_by = _tk(timestamp_field)
query = []
if feature.match_all:
match = []
for tag in feature.match_all:
k, v = tag['tag'], tag['value']
match.append({k: v})
query.append({'$match': {'$or': match}})
if metric == "count":
return query + [
{'$match': {field: {'$exists': True}}},
{'$bucket': {
'groupBy': group_by,
'boundaries': boundaries,
'default': None,
'output': {feature.name: {'$sum': 1}},
}}
]
if metric == "mean":
metric = "avg"
return query + [
{'$bucket': {
'groupBy': group_by,
'boundaries': boundaries,
'default': None,
'output': {feature.name: {
_tk(metric): _tk(field),
}}
}}
]
def catch_query_error(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except (
pymongo.errors.PyMongoError
) as exn:
raise errors.BucketError(self.name, str(exn))
return wrapper
class MongoBucket(Bucket):
"""
MongoDB bucket
"""
SCHEMA = Bucket.SCHEMA.extend({
Required('addr'): str,
Required('database'): str,
Required('collection'): schemas.key,
Optional('username'): All(schemas.key, Length(max=256)),
Optional('password'): str,
Optional('auth_source'): str,
})
def __init__(self, cfg):
cfg['type'] = 'mongodb'
super().__init__(cfg)
self._client = None
self._db = None
self._pending = {}
self._nb_pending = 0
@property
def collection(self):
return self.cfg['collection']
@property
def client(self):
if self._client is None:
addr = parse_addr(self.cfg['addr'], default_port=8086)
logging.info(
"connecting to mongodb on %s:%d, using database '%s'",
addr['host'],
addr['port'],
self.cfg['database'],
)
kwargs = {}
username = self.cfg.get('username')
if username:
kwargs['username'] = username
kwargs['password'] = self.cfg.get('password')
auth_src = self.cfg.get('auth_source')
if auth_src:
kwargs['authSource'] = auth_src
self._client = pymongo.MongoClient(
host=addr['host'],
port=addr['port'],
**kwargs
)
return self._client
@property
def db(self):
if self._db is None:
self._db = self.client[self.cfg['database']]
return self._db
@catch_query_error
def init(self, *args, **kwargs):
return
@catch_query_error
def drop(self, db=None):
self.client.drop_database(db or self.cfg['database'])
def nb_pending(self):
return self._nb_pending
def enqueue(self, collection, request):
if collection not in self._pending:
self._pending[collection] = []
self._pending[collection].append(request)
self._nb_pending += 1
def clear_pending(self):
self._pending = {}
def insert_data(
self,
data,
tags=None,
):
if tags is not None:
for tag, tag_val in tags.items():
data[tag] = tag_val
self.enqueue(self.collection, pymongo.InsertOne(data))
def insert_times_data(
self,
ts,
data,
tags=None,
*args,
**kwargs
):
"""
Insert data
"""
ts = make_ts(ts)
data = data.copy()
data[self.timestamp_field] = ts
self.insert_data(data, tags=tags)
@catch_query_error
def send_bulk(self, pending):
"""
Send data to MongoDB
"""
for collection, requests in pending.items():
self.db[collection].bulk_write(requests)
@catch_query_error
def get_times_data(
self,
bucket_interval,
features,
from_date,
to_date,
):
bucket_interval = int(bucket_interval)
from_ts = int(math.floor(make_ts(from_date) /
bucket_interval) * bucket_interval)
to_ts = int(math.ceil(make_ts(to_date) /
bucket_interval) * bucket_interval)
boundaries = list(
range(from_ts, to_ts + bucket_interval, bucket_interval))
nb_buckets = len(boundaries)
buckets = np.full((nb_buckets, len(features)),
np.nan, dtype=float)
nb_buckets_found = 0
for i, feature in enumerate(features):
query = _build_query(feature, self.timestamp_field, boundaries)
resp = self.db[self.collection].aggregate(query)
for entry in resp:
ts = entry['_id']
if ts is None:
continue
value = entry[feature.name]
j = int((ts - from_ts) / bucket_interval)
buckets[j][i] = value
if j >= nb_buckets_found:
nb_buckets_found = j + 1
if nb_buckets_found == 0:
raise errors.NoData()
result = []
ts = from_ts
for bucket in buckets[0:nb_buckets_found]:
result.append((ts - from_ts, list(bucket), ts))
ts += bucket_interval
return result
```
#### File: loudml/loudml/schemas.py
```python
import loudml.errors
from voluptuous import (
All,
Any,
Boolean,
Invalid,
Length,
Match,
message,
Required,
Optional,
Range,
Schema,
)
import voluptuous as vol
from urllib.parse import urlparse
from .misc import (
make_ts,
parse_timedelta,
)
key = All(
str,
Length(min=1),
Match("^[a-zA-Z0-9-_@]+$"),
)
time_str_key = All(
str,
Length(min=1),
Match("^[:0-9]+$"),
)
dotted_key = All(
str,
Length(min=1),
Match("^[a-zA-Z0-9-_@.]+$"),
)
bracket_key = All(
str,
Length(min=1),
Match("^{{[a-zA-Z0-9-_@.]+}}$"),
)
seasonality = Schema({
Optional('daytime', default=False): Boolean(),
Optional('weekday', default=False): Boolean(),
})
score = Any(All(Any(int, float), Range(min=0, max=100)), None)
class Url:
"""Validate an URL."""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, v):
url_in = str(v)
res = urlparse(url_in)
if len(res.fragment) or len(res.query) or len(res.scheme):
raise vol.Invalid(
'You have attempted to access a restricted URL, the URL contains invalid data.') # noqa
if not len(res.path) or res.path[0] != '/':
raise vol.Invalid(
'You have attempted to access a restricted URL, the URL contains invalid path.') # noqa
return res.path
ScheduledJob = Schema({
Required('name'): All(str, Length(max=256)),
Required('method'): Any('head', 'get', 'post', 'patch', 'delete'),
Required('relative_url'): All(str, Url()),
Optional('params'): Schema({str: Any(int, float, str, bool)}),
Optional('json'): Schema({str: Any(int, float, str, bool)}),
Required('every'): Schema({
Required('count'): Any(int, float),
Required('unit'): Any(
'second',
'seconds',
'minute',
'minutes',
'hour',
'hours',
'day',
'days',
'week',
'weeks',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday',
),
Optional('at'): All(time_str_key, Length(max=256)),
}),
})
class TimeDelta:
"""
Schema for time-delta
"""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, v):
parse_timedelta(v, **self._kwargs)
return v
@message('expected absolute or relative date', cls=Invalid)
def Timestamp(v):
"""
Schema for timestamps
"""
try:
make_ts(v)
except TypeError:
raise ValueError("value expected")
return v
def validate(schema, data, name=None):
"""
Validate data against a schema
"""
try:
return schema(data)
except Invalid as exn:
raise loudml.errors.Invalid(
exn.error_message,
name=name,
path=exn.path,
)
```
#### File: loudml/tests/test_filestorage.py
```python
from loudml.filestorage import FileStorage
from loudml.donut import DonutModel
from loudml import (
errors,
)
import loudml.vendor
import datetime
import logging
import tempfile
import unittest
logging.getLogger('tensorflow').disabled = True
FEATURES = [
{
'name': 'avg_foo',
'metric': 'avg',
'field': 'foo',
'default': 0,
},
]
class TestFileStorage(unittest.TestCase):
def test_create_and_list(self):
with tempfile.TemporaryDirectory() as tmp:
storage = FileStorage(tmp)
# Create
model = DonutModel(dict(
name='test-1',
offset=30,
span=300,
bucket_interval=3,
interval=60,
features=FEATURES,
max_threshold=70,
min_threshold=60,
))
self.assertEqual(model.type, 'donut')
storage.create_model(model)
self.assertTrue(storage.model_exists(model.name))
# Create
model = DonutModel(dict(
name='test-2',
offset=56,
span=200,
bucket_interval=20,
interval=120,
features=FEATURES,
max_threshold=70,
min_threshold=60,
))
storage.create_model(model)
# List
self.assertEqual(storage.list_models(), ["test-1", "test-2"])
# Delete
storage.delete_model("test-1")
self.assertFalse(storage.model_exists("test-1"))
self.assertEqual(storage.list_models(), ["test-2"])
with self.assertRaises(errors.ModelNotFound):
storage.get_model_data("test-1")
# Rebuild
model = storage.load_model("test-2")
self.assertEqual(model.type, 'donut')
self.assertEqual(model.name, 'test-2')
self.assertEqual(model.offset, 56)
```
#### File: loudml/tests/test_metrics.py
```python
import unittest
from unittest import mock
from unittest.mock import MagicMock
from loudml.metrics import send_metrics
from loudml.metrics import MyConfigParser
from loudml.dummystorage import DummyStorage
def mocked_get_distribution(*args, **kwargs):
class distribution:
version = '1.5'
return distribution()
def mocked_requests_post(*args, **kwargs):
pass
class TestMetrics(unittest.TestCase):
@mock.patch('pkg_resources.get_distribution',
side_effect=mocked_get_distribution)
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_send_metrics(self, mock_get, mock_get2):
config = {'enable': True}
storage = DummyStorage()
storage.list_models = MagicMock(return_value=['foo', 'bar'])
MyConfigParser.read = MagicMock()
MyConfigParser.get = MagicMock(return_value='CentOS')
send_metrics(config, storage)
storage.list_models.assert_called()
```
#### File: loudml/tests/test_schemas.py
```python
import unittest
import loudml.vendor
from loudml import (
errors,
schemas,
)
class TestSchemas(unittest.TestCase):
def valid(self, value):
schemas.validate(self.schema, value)
def invalid(self, value):
with self.assertRaises(errors.Invalid):
schemas.validate(self.schema, value)
def test_key(self):
self.schema = schemas.key
self.valid("foo")
self.valid("foo_bar")
self.valid("Foo-Bar")
self.valid("00_foo_00_bar_001")
self.valid("_foo")
self.invalid("")
self.invalid("foo/bar")
self.invalid(".foo")
def test_timestamp(self):
self.schema = schemas.Timestamp()
self.valid("now")
self.valid("now-1d")
self.valid("2018-01-08T09:39:26.123Z")
self.valid("1515404366.123")
self.valid(1515404366.123)
self.invalid("")
self.invalid(None)
self.invalid("foo")
``` |
{
"source": "jkbren/networks-and-dataviz",
"score": 3
} |
#### File: jkbren/networks-and-dataviz/tail_estimation.py
```python
import sys
import time
import argparse
import os
import warnings
import numpy as np
from matplotlib import pyplot as plt
# =========================================
# ========== Auxiliary Functions ==========
# =========================================
def add_uniform_noise(data_sequence, p = 1):
"""
Function to add uniform random noise to a given dataset.
Uniform noise in range [-5*10^(-p), 5*10^(-p)] is added to each
data entry. For integer-valued sequences, p = 1.
Args:
data_sequence: numpy array of data to be processed.
p: integer parameter controlling noise amplitude.
Returns:
numpy array with noise-added entries.
"""
if p < 1:
print("Parameter p should be greater or equal to 1.")
return None
noise = np.random.uniform(-5.*10**(-p), 5*10**(-p), size = len(data_sequence))
randomized_data_sequence = data_sequence + noise
# ensure there are no negative entries after noise is added
randomized_data_sequence = randomized_data_sequence[np.where(randomized_data_sequence > 0)]
return randomized_data_sequence
def get_distribution(data_sequence, number_of_bins = 30):
"""
Function to get a log-binned distribution of a given dataset.
Args:
data_sequence: numpy array with data to calculate
log-binned PDF on.
number_of_bins: number of logarithmic bins to use.
Returns:
x, y: numpy arrays containing midpoints of bins
and corresponding PDF values.
"""
# define the support of the distribution
lower_bound = min(data_sequence)
upper_bound = max(data_sequence)
# define bin edges
log = np.log10
lower_bound = log(lower_bound) if lower_bound > 0 else -1
upper_bound = log(upper_bound)
bins = np.logspace(lower_bound, upper_bound, number_of_bins)
# compute the histogram using numpy
y, __ = np.histogram(data_sequence, bins = bins, density = True)
# for each bin, compute its midpoint
x = bins[1:] - np.diff(bins) / 2.0
# if bin is empty, drop it from the resulting list
drop_indices = [i for i,k in enumerate(y) if k == 0.0]
x = [k for i,k in enumerate(x) if i not in drop_indices]
y = [k for i,k in enumerate(y) if i not in drop_indices]
return x, y
def get_ccdf(degree_sequence):
"""
Function to get CCDF of the list of degrees.
Args:
degree_sequence: numpy array of nodes' degrees.
Returns:
uniques: unique degree values met in the sequence.
1-CDF: CCDF values corresponding to the unique values
from the 'uniques' array.
"""
uniques, counts = np.unique(degree_sequence, return_counts=True)
cumprob = np.cumsum(counts).astype(np.double) / (degree_sequence.size)
return uniques[::-1], (1. - cumprob)[::-1]
# ================================================
# ========== Hill Tail Index Estimation ==========
# ================================================
def get_moments_estimates_1(ordered_data):
"""
Function to calculate first moments array given an ordered data
sequence. Decreasing ordering is required.
Args:
ordered_data: numpy array of ordered data for which
the 1st moment (Hill estimator)
is calculated.
Returns:
M1: numpy array of 1st moments (Hill estimator)
corresponding to all possible order statistics
of the dataset.
"""
logs_1 = np.log(ordered_data)
logs_1_cumsum = np.cumsum(logs_1[:-1])
k_vector = np.arange(1, len(ordered_data))
M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]
return M1
def get_moments_estimates_2(ordered_data):
"""
Function to calculate first and second moments arrays
given an ordered data sequence.
Decreasing ordering is required.
Args:
ordered_data: numpy array of ordered data for which
the 1st (Hill estimator) and 2nd moments
are calculated.
Returns:
M1: numpy array of 1st moments (Hill estimator)
corresponding to all possible order statistics
of the dataset.
M2: numpy array of 2nd moments corresponding to all
possible order statistics of the dataset.
"""
logs_1 = np.log(ordered_data)
logs_2 = (np.log(ordered_data))**2
logs_1_cumsum = np.cumsum(logs_1[:-1])
logs_2_cumsum = np.cumsum(logs_2[:-1])
k_vector = np.arange(1, len(ordered_data))
M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]
M2 = (1./k_vector)*logs_2_cumsum - (2.*logs_1[1:]/k_vector)*logs_1_cumsum\
+ logs_2[1:]
return M1, M2
def get_moments_estimates_3(ordered_data):
"""
Function to calculate first, second and third moments
arrays given an ordered data sequence.
Decreasing ordering is required.
Args:
ordered_data: numpy array of ordered data for which
the 1st (Hill estimator), 2nd and 3rd moments
are calculated.
Returns:
M1: numpy array of 1st moments (Hill estimator)
corresponding to all possible order statistics
of the dataset.
M2: numpy array of 2nd moments corresponding to all
possible order statistics of the dataset.
M3: numpy array of 3rd moments corresponding to all
possible order statistics of the dataset.
"""
logs_1 = np.log(ordered_data)
logs_2 = (np.log(ordered_data))**2
logs_3 = (np.log(ordered_data))**3
logs_1_cumsum = np.cumsum(logs_1[:-1])
logs_2_cumsum = np.cumsum(logs_2[:-1])
logs_3_cumsum = np.cumsum(logs_3[:-1])
k_vector = np.arange(1, len(ordered_data))
M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]
M2 = (1./k_vector)*logs_2_cumsum - (2.*logs_1[1:]/k_vector)*logs_1_cumsum\
+ logs_2[1:]
M3 = (1./k_vector)*logs_3_cumsum - (3.*logs_1[1:]/k_vector)*logs_2_cumsum\
+ (3.*logs_2[1:]/k_vector)*logs_1_cumsum - logs_3[1:]
# cleaning exceptional cases
clean_indices = np.where((M2 <= 0) | (M3 == 0) | (np.abs(1.-(M1**2)/M2) < 1e-10)\
|(np.abs(1.-(M1*M2)/M3) < 1e-10))
M1[clean_indices] = np.nan
M2[clean_indices] = np.nan
M3[clean_indices] = np.nan
return M1, M2, M3
def hill_dbs(ordered_data, t_bootstrap = 0.5,
r_bootstrap = 500, eps_stop = 1.0,
verbose = False, diagn_plots = False):
"""
Function to perform double-bootstrap procedure for
Hill estimator.
Args:
ordered_data: numpy array for which double-bootstrap
is performed. Decreasing ordering is required.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
k_star: number of order statistics optimal for estimation
according to the double-bootstrap procedure.
x1_arr: array of fractions of order statistics used for the
1st bootstrap sample.
n1_amse: array of AMSE values produced by the 1st bootstrap
sample.
k1_min: value of fraction of order statistics corresponding
to the minimum of AMSE for the 1st bootstrap sample.
max_index1: index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
x2_arr: array of fractions of order statistics used for the
2nd bootstrap sample.
n2_amse: array of AMSE values produced by the 2nd bootstrap
sample.
k2_min: value of fraction of order statistics corresponding
to the minimum of AMSE for the 2nd bootstrap sample.
max_index2: index of the 2nd bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
"""
if verbose:
print("Performing Hill double-bootstrap...")
n = len(ordered_data)
eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))
n1 = int(n**eps_bootstrap)
samples_n1 = np.zeros(n1-1)
good_counts1 = np.zeros(n1-1)
k1 = None
k2 = None
min_index1 = 1
min_index2 = 1
while k2 == None:
# first bootstrap with n1 sample size
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n1, replace = True)
sample[::-1].sort()
M1, M2 = get_moments_estimates_2(sample)
current_amse1 = (M2 - 2.*(M1)**2)**2
samples_n1 += current_amse1
good_counts1[np.where(current_amse1 != np.nan)] += 1
averaged_delta = samples_n1 / good_counts1
max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()
k1 = np.nanargmin(averaged_delta[min_index1:max_index1]) + 1 + min_index1 #take care of indexing
if diagn_plots:
n1_amse = averaged_delta
x1_arr = np.linspace(1./n1, 1.0, n1)
# second bootstrap with n2 sample size
n2 = int(n1*n1/float(n))
samples_n2 = np.zeros(n2-1)
good_counts2 = np.zeros(n2-1)
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n2, replace = True)
sample[::-1].sort()
M1, M2 = get_moments_estimates_2(sample)
current_amse2 = (M2 - 2.*(M1**2))**2
samples_n2 += current_amse2
good_counts2[np.where(current_amse2 != np.nan)] += 1
max_index2 = (np.abs(np.linspace(1./n2, 1.0, n2) - eps_stop)).argmin()
averaged_delta = samples_n2 / good_counts2
max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()
k2 = np.nanargmin(averaged_delta[min_index2:max_index2]) + 1 + min_index2 #take care of indexing
if diagn_plots:
n2_amse = averaged_delta
x2_arr = np.linspace(1./n2, 1.0, n2)
if k2 > k1:
print("Warning (Hill): k2 > k1, AMSE false minimum suspected, resampling...")
# move left AMSE boundary to avoid numerical issues
min_index1 = min_index1 + int(0.005*n)
min_index2 = min_index2 + int(0.005*n)
k2 = None
'''
# this constant is provided in the Danielsson's paper
# use instead of rho below if needed
rho = (np.log(k1)/(2.*np.log(n1) - np.log(k1)))\
**(2.*(np.log(n1) - np.log(k1))/(np.log(n1)))
'''
# this constant is provided in Qi's paper
rho = (1. - (2*(np.log(k1) - np.log(n1))/(np.log(k1))))**(np.log(k1)/np.log(n1) - 1.)
k_star = (k1*k1/float(k2)) * rho
k_star = int(np.round(k_star))
# enforce k_star to pick 2nd value (rare cases of extreme cutoffs)
if k_star == 0:
k_star = 2
if int(k_star) >= len(ordered_data):
print("WARNING: estimated threshold k is larger than the size of data")
k_star = len(ordered_data)-1
if verbose:
print("--- Hill double-bootstrap information ---")
print("Size of the 1st bootstrap sample n1:", n1)
print("Size of the 2nd bootstrap sample n2:", n2)
print("Estimated k1:", k1)
print("Estimated k2:", k2)
print("Estimated constant rho:", rho)
print("Estimated optimal k:", k_star)
print("-----------------------------------------")
if not diagn_plots:
x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None
return k_star, x1_arr, n1_amse, k1/float(n1), max_index1, x2_arr, n2_amse, k2/float(n2), max_index2
def hill_estimator(ordered_data,
bootstrap = True, t_bootstrap = 0.5,
r_bootstrap = 500, verbose = False,
diagn_plots = False, eps_stop = 0.99):
"""
Function to calculate Hill estimator for a given dataset.
If bootstrap flag is True, double-bootstrap procedure
for estimation of the optimal number of order statistics is
performed.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
bootstrap: flag to switch on/off double-bootstrap procedure.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
results: list containing an array of order statistics,
an array of corresponding tail index estimates,
the optimal order statistic estimated by double-
bootstrap and the corresponding tail index,
an array of fractions of order statistics used for
the 1st bootstrap sample with an array of corresponding
AMSE values, value of fraction of order statistics
corresponding to the minimum of AMSE for the 1st bootstrap
sample, index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter; and the same characteristics for the
2nd bootstrap sample.
"""
k_arr = np.arange(1, len(ordered_data))
xi_arr = get_moments_estimates_1(ordered_data)
if bootstrap:
results = hill_dbs(ordered_data,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
verbose = verbose,
diagn_plots = diagn_plots,
eps_stop = eps_stop)
k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results
while k_star == None:
print("Resampling...")
results = hill_dbs(ordered_data,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
verbose = verbose,
diagn_plots = diagn_plots,
eps_stop = eps_stop)
k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results
xi_star = xi_arr[k_star-1]
print("Adjusted Hill estimated gamma:", 1 + 1./xi_star)
print("**********")
else:
k_star, xi_star = None, None
x1_arr, n1_amse, k1, max_index1 = 4*[None]
x2_arr, n2_amse, k2, max_index2 = 4*[None]
results = [k_arr, xi_arr, k_star, xi_star, x1_arr, n1_amse, k1, max_index1,\
x2_arr, n2_amse, k2, max_index2]
return results
def smooth_hill_estimator(ordered_data, r_smooth = 2):
"""
Function to calculate smooth Hill estimator for a
given ordered dataset.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
r_smooth: integer parameter controlling the width
of smoothing window. Typically small
value such as 2 or 3.
Returns:
k_arr: numpy array of order statistics based on the data provided.
xi_arr: numpy array of tail index estimates corresponding to
the order statistics array k_arr.
"""
n = len(ordered_data)
M1 = get_moments_estimates_1(ordered_data)
xi_arr = np.zeros(int(np.floor(float(n)/r_smooth)))
k_arr = np.arange(1, int(np.floor(float(n)/r_smooth))+1)
xi_arr[0] = M1[0]
bin_lengths = np.array([1.]+[float((r_smooth-1)*k) for k in k_arr[:-1]])
cum_sum = 0.0
for i in range(1, r_smooth*int(np.floor(float(n)/r_smooth))-1):
k = i
cum_sum += M1[k]
if (k+1) % (r_smooth) == 0:
xi_arr[int(k+1)//int(r_smooth)] = cum_sum
cum_sum -= M1[int(k+1)//int(r_smooth)]
xi_arr = xi_arr/bin_lengths
return k_arr, xi_arr
# ===================================================
# ========== Moments Tail Index Estimation ==========
# ===================================================
def moments_dbs_prefactor(xi_n, n1, k1):
"""
Function to calculate pre-factor used in moments
double-bootstrap procedure.
Args:
xi_n: moments tail index estimate corresponding to
sqrt(n)-th order statistic.
n1: size of the 1st bootstrap in double-bootstrap
procedure.
k1: estimated optimal order statistic based on the 1st
bootstrap sample.
Returns:
prefactor: constant used in estimation of the optimal
stopping order statistic for moments estimator.
"""
def V_sq(xi_n):
if xi_n >= 0:
V = 1. + (xi_n)**2
return V
else:
a = (1.-xi_n)**2
b = (1-2*xi_n)*(6*((xi_n)**2)-xi_n+1)
c = (1.-3*xi_n)*(1-4*xi_n)
V = a*b/c
return V
def V_bar_sq(xi_n):
if xi_n >= 0:
V = 0.25*(1+(xi_n)**2)
return V
else:
a = 0.25*((1-xi_n)**2)
b = 1-8*xi_n+48*(xi_n**2)-154*(xi_n**3)
c = 263*(xi_n**4)-222*(xi_n**5)+72*(xi_n**6)
d = (1.-2*xi_n)*(1-3*xi_n)*(1-4*xi_n)
e = (1.-5*xi_n)*(1-6*xi_n)
V = a*(b+c)/(d*e)
return V
def b(xi_n, rho):
if xi_n < rho:
a1 = (1.-xi_n)*(1-2*xi_n)
a2 = (1.-rho-xi_n)*(1.-rho-2*xi_n)
return a1/a2
elif xi_n >= rho and xi_n < 0:
return 1./(1-xi_n)
else:
b = (xi_n/(rho*(1.-rho))) + (1./((1-rho)**2))
return b
def b_bar(xi_n, rho):
if xi_n < rho:
a1 = 0.5*(-rho*(1-xi_n)**2)
a2 = (1.-xi_n-rho)*(1-2*xi_n-rho)*(1-3*xi_n-rho)
return a1/a2
elif xi_n >= rho and xi_n < 0:
a1 = 1-2*xi_n-np.sqrt((1-xi_n)*(1-2.*xi_n))
a2 = (1.-xi_n)*(1-2*xi_n)
return a1/a2
else:
b = (-1.)*((rho + xi_n*(1-rho))/(2*(1-rho)**3))
return b
rho = np.log(k1)/(2*np.log(k1) - 2.*np.log(n1))
a = (V_sq(xi_n)) * (b_bar(xi_n, rho)**2)
b = V_bar_sq(xi_n) * (b(xi_n, rho)**2)
prefactor = (a/b)**(1./(1. - 2*rho))
return prefactor
def moments_dbs(ordered_data, xi_n, t_bootstrap = 0.5,
r_bootstrap = 500, eps_stop = 1.0,
verbose = False, diagn_plots = False):
"""
Function to perform double-bootstrap procedure for
moments estimator.
Args:
ordered_data: numpy array for which double-bootstrap
is performed. Decreasing ordering is required.
xi_n: moments tail index estimate corresponding to
sqrt(n)-th order statistic.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
k_star: number of order statistics optimal for estimation
according to the double-bootstrap procedure.
x1_arr: array of fractions of order statistics used for the
1st bootstrap sample.
n1_amse: array of AMSE values produced by the 1st bootstrap
sample.
k1_min: value of fraction of order statistics corresponding
to the minimum of AMSE for the 1st bootstrap sample.
max_index1: index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
x2_arr: array of fractions of order statistics used for the
2nd bootstrap sample.
n2_amse: array of AMSE values produced by the 2nd bootstrap
sample.
k2_min: value of fraction of order statistics corresponding
to the minimum of AMSE for the 2nd bootstrap sample.
max_index2: index of the 2nd bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
"""
if verbose:
print("Performing moments double-bootstrap...")
n = len(ordered_data)
eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))
# first bootstrap with n1 sample size
n1 = int(n**eps_bootstrap)
samples_n1 = np.zeros(n1-1)
good_counts1 = np.zeros(n1-1)
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n1, replace = True)
sample[::-1].sort()
M1, M2, M3 = get_moments_estimates_3(sample)
xi_2 = M1 + 1. - 0.5*((1. - (M1*M1)/M2))**(-1.)
xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))
samples_n1 += (xi_2 - xi_3)**2
good_counts1[np.where((xi_2 - xi_3)**2 != np.nan)] += 1
max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()
averaged_delta = samples_n1 / good_counts1
k1 = np.nanargmin(averaged_delta[:max_index1]) + 1 #take care of indexing
if diagn_plots:
n1_amse = averaged_delta
x1_arr = np.linspace(1./n1, 1.0, n1)
#r second bootstrap with n2 sample size
n2 = int(n1*n1/float(n))
samples_n2 = np.zeros(n2-1)
good_counts2 = np.zeros(n2-1)
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n2, replace = True)
sample[::-1].sort()
M1, M2, M3 = get_moments_estimates_3(sample)
xi_2 = M1 + 1. - 0.5*(1. - (M1*M1)/M2)**(-1.)
xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))
samples_n2 += (xi_2 - xi_3)**2
good_counts2[np.where((xi_2 - xi_3)**2 != np.nan)] += 1
max_index2 = (np.abs(np.linspace(1./n2, 1.0, n2) - eps_stop)).argmin()
averaged_delta = samples_n2 / good_counts2
k2 = np.nanargmin(averaged_delta[:max_index2]) + 1 #take care of indexing
if diagn_plots:
n2_amse = averaged_delta
x2_arr = np.linspace(1./n2, 1.0, n2)
if k2 > k1:
print("WARNING(moments): estimated k2 is greater than k1! Re-doing bootstrap...")
return 9*[None]
#calculate estimated optimal stopping k
prefactor = moments_dbs_prefactor(xi_n, n1, k1)
k_star = int((k1*k1/float(k2)) * prefactor)
if int(k_star) >= len(ordered_data):
print("WARNING: estimated threshold k is larger than the size of data")
k_star = len(ordered_data)-1
if verbose:
print("--- Moments double-bootstrap information ---")
print("Size of the 1st bootstrap sample n1:", n1)
print("Size of the 2nd bootstrap sample n2:", n2)
print("Estimated k1:", k1)
print("Estimated k2:", k2)
print("Estimated constant:", prefactor)
print("Estimated optimal k:", k_star)
print("--------------------------------------------")
if not diagn_plots:
x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None
return k_star, x1_arr, n1_amse, k1/float(n1), max_index1, x2_arr, n2_amse, k2/float(n2), max_index2
def moments_estimator(ordered_data,
bootstrap = True, t_bootstrap = 0.5,
r_bootstrap = 500, verbose = False,
diagn_plots = False, eps_stop = 0.99):
"""
Function to calculate moments estimator for a given dataset.
If bootstrap flag is True, double-bootstrap procedure
for estimation of the optimal number of order statistics is
performed.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
bootstrap: flag to switch on/off double-bootstrap procedure.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
results: list containing an array of order statistics,
an array of corresponding tail index estimates,
the optimal order statistic estimated by double-
bootstrap and the corresponding tail index,
an array of fractions of order statistics used for
the 1st bootstrap sample with an array of corresponding
AMSE values, value of fraction of order statistics
corresponding to the minimum of AMSE for the 1st bootstrap
sample, index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter; and the same characteristics for the
2nd bootstrap sample.
"""
n = len(ordered_data)
M1, M2 = get_moments_estimates_2(ordered_data)
xi_arr = M1 + 1. - 0.5*(1. - (M1*M1)/M2)**(-1)
k_arr = np.arange(1, len(ordered_data))
if bootstrap:
xi_n = xi_arr[int(np.floor(n**0.5))-1]
results = moments_dbs(ordered_data, xi_n,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
verbose = verbose,
diagn_plots = diagn_plots,
eps_stop = eps_stop)
while results[0] == None:
print("Resampling...")
results = moments_dbs(ordered_data, xi_n,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
verbose = verbose,
diagn_plots = diagn_plots,
eps_stop = eps_stop)
k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results
xi_star = xi_arr[k_star-1]
if xi_star <= 0:
print ("Moments estimated gamma: infinity (xi <= 0).")
else:
print ("Moments estimated gamma:", 1 + 1./xi_star)
print("**********")
else:
k_star, xi_star = None, None
x1_arr, n1_amse, k1, max_index1 = 4*[None]
x2_arr, n2_amse, k2, max_index2 = 4*[None]
results = [k_arr, xi_arr, k_star, xi_star, x1_arr, n1_amse, k1, max_index1,\
x2_arr, n2_amse, k2, max_index2]
return results
# =======================================================
# ========== Kernel-type Tail Index Estimation ==========
# =======================================================
def get_biweight_kernel_estimates(ordered_data, hsteps, alpha):
"""
Function to calculate biweight kernel-type estimates for tail index.
Biweight kernel is defined as:
phi(u) = (15/8) * (1 - u^2)^2
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
hsteps: parameter controlling number of bandwidth steps
of the kernel-type estimator.
alpha: parameter controlling the amount of "smoothing"
for the kernel-type estimator. Should be greater
than 0.5.
Returns:
h_arr: numpy array of fractions of order statistics included
in kernel-type tail index estimation.
xi_arr: numpy array with tail index estimated corresponding
to different fractions of order statistics included
listed in h_arr array.
"""
n = len(ordered_data)
logs = np.log(ordered_data)
differences = logs[:-1] - logs[1:]
i_arr = np.arange(1, n)/float(n)
i3_arr = i_arr**3
i5_arr = i_arr**5
i_alpha_arr = i_arr**alpha
i_alpha2_arr = i_arr**(2.+alpha)
i_alpha4_arr = i_arr**(4.+alpha)
t1 = np.cumsum(i_arr*differences)
t2 = np.cumsum(i3_arr*differences)
t3 = np.cumsum(i5_arr*differences)
t4 = np.cumsum(i_alpha_arr*differences)
t5 = np.cumsum(i_alpha2_arr*differences)
t6 = np.cumsum(i_alpha4_arr*differences)
h_arr = np.logspace(np.log10(1./n), np.log10(1.0), hsteps)
max_i_vector = (np.floor((n*h_arr))-2.).astype(int)
gamma_pos = (15./(8*h_arr))*t1[max_i_vector]\
- (15./(4*(h_arr**3)))*t2[max_i_vector]\
+ (15./(8*(h_arr**5)))*t3[max_i_vector]
q1 = (15./(8*h_arr))*t4[max_i_vector]\
+ (15./(8*(h_arr**5)))*t6[max_i_vector]\
- (15./(4*(h_arr**3)))*t5[max_i_vector]
q2 = (15.*(1+alpha)/(8*h_arr))*t4[max_i_vector]\
+ (15.*(5+alpha)/(8*(h_arr**5)))*t6[max_i_vector]\
- (15.*(3+alpha)/(4*(h_arr**3)))*t5[max_i_vector]
xi_arr = gamma_pos -1. + q2/q1
return h_arr, xi_arr
def get_triweight_kernel_estimates(ordered_data, hsteps, alpha):
"""
Function to calculate triweight kernel-type estimates for tail index.
Triweight kernel is defined as:
phi(u) = (35/16) * (1 - u^2)^3
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
hsteps: parameter controlling number of bandwidth steps
of the kernel-type estimator.
alpha: parameter controlling the amount of "smoothing"
for the kernel-type estimator. Should be greater
than 0.5.
Returns:
h_arr: numpy array of fractions of order statistics included
in kernel-type tail index estimation.
xi_arr: numpy array with tail index estimated corresponding
to different fractions of order statistics included
listed in h_arr array.
"""
n = len(ordered_data)
logs = np.log(ordered_data)
differences = logs[:-1] - logs[1:]
i_arr = np.arange(1, n)/float(n)
i3_arr = i_arr**3
i5_arr = i_arr**5
i7_arr = i_arr**7
i_alpha_arr = i_arr**alpha
i_alpha2_arr = i_arr**(2.+alpha)
i_alpha4_arr = i_arr**(4.+alpha)
i_alpha6_arr = i_arr**(6.+alpha)
t1 = np.cumsum(i_arr*differences)
t2 = np.cumsum(i3_arr*differences)
t3 = np.cumsum(i5_arr*differences)
t4 = np.cumsum(i7_arr*differences)
t5 = np.cumsum(i_alpha_arr*differences)
t6 = np.cumsum(i_alpha2_arr*differences)
t7 = np.cumsum(i_alpha4_arr*differences)
t8 = np.cumsum(i_alpha6_arr*differences)
h_arr = np.logspace(np.log10(1./n), np.log10(1.0), hsteps)
max_i_vector = (np.floor((n*h_arr))-2.).astype(int)
gamma_pos = (35./(16*h_arr))*t1[max_i_vector]\
- (105./(16*(h_arr**3)))*t2[max_i_vector]\
+ (105./(16*(h_arr**5)))*t3[max_i_vector]\
- (35./(16*(h_arr**7)))*t4[max_i_vector]
q1 = (35./(16*h_arr))*t5[max_i_vector]\
+ (105./(16*(h_arr**5)))*t7[max_i_vector]\
- (105./(16*(h_arr**3)))*t6[max_i_vector]\
- (35./(16*(h_arr**7)))*t8[max_i_vector]
q2 = (35.*(1+alpha)/(16*h_arr))*t5[max_i_vector] \
+ (105.*(5+alpha)/(16*(h_arr**5)))*t7[max_i_vector] \
- (105.*(3+alpha)/(16*(h_arr**3)))*t6[max_i_vector] \
- (35.*(7+alpha)/(16*(h_arr**7)))*t8[max_i_vector]
xi_arr = gamma_pos - 1. + q2/q1
return h_arr, xi_arr
def kernel_type_dbs(ordered_data, hsteps, t_bootstrap = 0.5,
r_bootstrap = 500, alpha = 0.6, eps_stop = 1.0,
verbose = False, diagn_plots = False):
"""
Function to perform double-bootstrap procedure for
moments estimator.
Args:
ordered_data: numpy array for which double-bootstrap
is performed. Decreasing ordering is required.
hsteps: parameter controlling number of bandwidth steps
of the kernel-type estimator.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
alpha: parameter controlling the amount of "smoothing"
for the kernel-type estimator. Should be greater
than 0.5.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
h_star: fraction of order statistics optimal for estimation
according to the double-bootstrap procedure.
x1_arr: array of fractions of order statistics used for the
1st bootstrap sample.
n1_amse: array of AMSE values produced by the 1st bootstrap
sample.
h1: value of fraction of order statistics corresponding
to the minimum of AMSE for the 1st bootstrap sample.
max_k_index1: index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
x2_arr: array of fractions of order statistics used for the
2nd bootstrap sample.
n2_amse: array of AMSE values produced by the 2nd bootstrap
sample.
h2: value of fraction of order statistics corresponding
to the minimum of AMSE for the 2nd bootstrap sample.
max_k_index2: index of the 2nd bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
"""
if verbose:
print("Performing kernel double-bootstrap...")
n = len(ordered_data)
eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))
# first bootstrap with n1 sample size
n1 = int(n**eps_bootstrap)
samples_n1 = np.zeros(hsteps)
good_counts1 = np.zeros(hsteps)
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n1, replace = True)
sample[::-1].sort()
_, xi2_arr = get_biweight_kernel_estimates(sample, hsteps, alpha)
_, xi3_arr = get_triweight_kernel_estimates(sample, hsteps, alpha)
samples_n1 += (xi2_arr - xi3_arr)**2
good_counts1[np.where((xi2_arr - xi3_arr)**2 != np.nan)] += 1
max_index1 = (np.abs(np.logspace(np.log10(1./n1), np.log10(1.0), hsteps) - eps_stop)).argmin()
x1_arr = np.logspace(np.log10(1./n1), np.log10(1.0), hsteps)
averaged_delta = samples_n1 / good_counts1
h1 = x1_arr[np.nanargmin(averaged_delta[:max_index1])]
if diagn_plots:
n1_amse = averaged_delta
# second bootstrap with n2 sample size
n2 = int(n1*n1/float(n))
if n2 < hsteps:
sys.exit("Number of h points is larger than number "+\
"of order statistics! Please either increase "+\
"the size of 2nd bootstrap or decrease number "+\
"of h grid points.")
samples_n2 = np.zeros(hsteps)
good_counts2 = np.zeros(hsteps)
for i in range(r_bootstrap):
sample = np.random.choice(ordered_data, n2, replace = True)
sample[::-1].sort()
_, xi2_arr = get_biweight_kernel_estimates(sample, hsteps, alpha)
_, xi3_arr = get_triweight_kernel_estimates(sample, hsteps, alpha)
samples_n2 += (xi2_arr - xi3_arr)**2
good_counts2[np.where((xi2_arr - xi3_arr)**2 != np.nan)] += 1
max_index2 = (np.abs(np.logspace(np.log10(1./n2), np.log10(1.0), hsteps) - eps_stop)).argmin()
x2_arr = np.logspace(np.log10(1./n2), np.log10(1.0), hsteps)
averaged_delta = samples_n2 / good_counts2
h2 = x2_arr[np.nanargmin(averaged_delta[:max_index2])]
if diagn_plots:
n2_amse = averaged_delta
A = (143.*((np.log(n1) + np.log(h1))**2)/(3*(np.log(n1) - 13. * np.log(h1))**2))\
**(-np.log(h1)/np.log(n1))
h_star = (h1*h1/float(h2)) * A
if h_star > 1:
print("WARNING: estimated threshold is larger than the size of data!")
print("WARNING: optimal h is set to 1...")
h_star = 1.
if verbose:
print("--- Kernel-type double-bootstrap information ---")
print("Size of the 1st bootstrap sample n1:", n1)
print("Size of the 2nd bootstrap sample n2:", n2)
print("Estimated h1:", h1)
print("Estimated h2:", h2)
print("Estimated constant A:", A)
print("Estimated optimal h:", h_star)
print("------------------------------------------------")
if not diagn_plots:
x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None
if x1_arr is not None:
max_k_index1 = x1_arr[max_index1]
else:
max_k_index1 = None
if x2_arr is not None:
max_k_index2 = x2_arr[max_index2]
else:
max_k_index2 = None
return h_star, x1_arr, n1_amse, h1, max_k_index1, x2_arr, n2_amse, h2, max_k_index2
def kernel_type_estimator(ordered_data, hsteps, alpha = 0.6,
bootstrap = True, t_bootstrap = 0.5,
r_bootstrap = 500, verbose = False,
diagn_plots = False, eps_stop = 0.99):
"""
Function to calculate kernel-type estimator for a given dataset.
If bootstrap flag is True, double-bootstrap procedure
for estimation of the optimal number of order statistics is
performed.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
hsteps: parameter controlling number of bandwidth steps
of the kernel-type estimator.
alpha: parameter controlling the amount of "smoothing"
for the kernel-type estimator. Should be greater
than 0.5.
bootstrap: flag to switch on/off double-bootstrap procedure.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose: flag controlling bootstrap verbosity.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
Returns:
results: list containing an array of fractions of order statistics,
an array of corresponding tail index estimates,
the optimal order statistic estimated by double-
bootstrap and the corresponding tail index,
an array of fractions of order statistics used for
the 1st bootstrap sample with an array of corresponding
AMSE values, value of fraction of order statistics
corresponding to the minimum of AMSE for the 1st bootstrap
sample, index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter; and the same characteristics for the
2nd bootstrap sample.
"""
n = len(ordered_data)
h_arr, xi_arr = get_biweight_kernel_estimates(ordered_data, hsteps,
alpha = alpha)
if bootstrap:
results = kernel_type_dbs(ordered_data, hsteps,
t_bootstrap = t_bootstrap,
alpha = alpha, r_bootstrap = r_bootstrap,
verbose = verbose, diagn_plots = diagn_plots,
eps_stop = eps_stop)
h_star, x1_arr, n1_amse, h1, max_index1, x2_arr, n2_amse, h2, max_index2 = results
while h_star == None:
print("Resampling...")
results = kernel_type_dbs(ordered_data, hsteps,
t_bootstrap = t_bootstrap,
alpha = alpha, r_bootstrap = r_bootstrap,
verbose = verbose, diagn_plots = diagn_plots,
eps_stop = eps_stop)
h_star, x1_arr, n1_amse, h1, max_index1, x2_arr, n2_amse, h2, max_index2 = results
#get k index which corresponds to h_star
k_star = np.argmin(np.abs(h_arr - h_star))
xi_star = xi_arr[k_star]
k_arr = []
k_star = int(np.floor(h_arr[k_star]*n))-1
k_arr = np.floor((h_arr * n))
if xi_star <= 0:
print ("Kernel-type estimated gamma: infinity (xi <= 0).")
else:
print ("Kernel-type estimated gamma:", 1 + 1./xi_star)
print("**********")
else:
k_star, xi_star = None, None
x1_arr, n1_amse, h1, max_index1 = 4*[None]
x2_arr, n2_amse, h2, max_index2 = 4*[None]
k_arr = np.floor(h_arr * n)
results = [np.array(k_arr), xi_arr, k_star, xi_star, x1_arr, n1_amse, h1, max_index1,\
x2_arr, n2_amse, h2, max_index2]
return results
# ====================================================
# ========== Pickands Tail Index Estimation ==========
# ====================================================
def pickands_estimator(ordered_data):
"""
Function to calculate Pickands estimator for the tail index.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
Returns:
k_arr: array containing order statistics used for
Pickands estimator calculation. Note that only estimates
up to floor(n/4)-th order statistic can be calculated.
xi_arr: array containing tail index estimates corresponding
to k-order statistics provided in k_arr.
"""
n = len(ordered_data)
indices_k = np.arange(1, int(np.floor(n/4.))+1)
indices_2k = 2*indices_k
indices_4k = 4*indices_k
Z_k = ordered_data[indices_k-1]
Z_2k = ordered_data[indices_2k-1]
Z_4k = ordered_data[indices_4k-1]
xi_arr = (1./np.log(2)) * np.log((Z_k - Z_2k) / (Z_2k - Z_4k))
k_arr = np.array([float(i) for i in range(1, int(np.floor(n/4.))+1)])
return k_arr, xi_arr
# ==================================================
# ========== Plotting and Data Processing ==========
# ==================================================
def make_plots(ordered_data, output_file_path, number_of_bins,
r_smooth, alpha, hsteps, bootstrap_flag, t_bootstrap,
r_bootstrap, diagn_plots, eps_stop, theta1, theta2,
verbose, noise_flag, p_noise, savedata):
"""
Function to create plots and save tail index estimation data.
Args:
ordered_data: numpy array for which tail index estimation
is performed. Decreasing ordering is required.
output_file_path: file path to which plots should be saved.
number_of_bins: number of log-bins for degree distribution.
r_smooth: integer parameter controlling the width
of smoothing window. Typically small
value such as 2 or 3.
alpha: parameter controlling the amount of "smoothing"
for the kernel-type estimator. Should be greater
than 0.5.
hsteps: parameter controlling number of bandwidth steps
of the kernel-type estimator.
bootstrap_flag: flag to switch on/off double-bootstrap procedure.
t_bootstrap: parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap: number of bootstrap resamplings for the 1st and 2nd
bootstraps.
diagn_plots: flag to switch on/off generation of AMSE diagnostic
plots.
eps_stop: parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to
consider during the AMSE minimization step.
theta1: Lower bound of plotting range, defined as
k_min = ceil(n^theta1).
Overwritten if plots behave badly within the range.
theta2: Upper bound of plotting range, defined as
k_max = floor(n^theta2).
Overwritten if plots behave badly within the range.
verbose: flag controlling bootstrap verbosity.
noise_flag: Switch on/off uniform noise in range
[-5*10^(-p), 5*10^(-p)] that is added to each
data point. Used for integer-valued sequences
with p = 1 (default = 1).
p_noise: integer parameter controlling noise amplitude.
savedata: Flag to save data files in the directory with plots.
"""
output_dir = os.path.dirname(os.path.realpath(output_file_path))
output_name = os.path.splitext(os.path.basename(output_file_path))[0]
# calculate log-binned PDF
if verbose:
print("Calculating PDF...")
t1 =time.time()
x_pdf, y_pdf = get_distribution(ordered_data,
number_of_bins = number_of_bins)
t2 =time.time()
if verbose:
print("Elapsed time(PDF):", t2-t1)
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_pdf.dat"), "w") as f:
for i in range(len(x_pdf)):
f.write(str(x_pdf[i]) + " " + str(y_pdf[i]) + "\n")
# calculate CCDF
if verbose:
print("Calculating CCDF...")
t1 = time.time()
x_ccdf, y_ccdf = get_ccdf(ordered_data)
t2 = time.time()
if verbose:
print("Elapsed time:", t2-t1)
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_ccdf.dat"), "w") as f:
for i in range(len(x_ccdf)):
f.write(str(x_ccdf[i]) + " " + str(y_ccdf[i]) + "\n")
# add noise if needed
if noise_flag:
original_discrete_data = ordered_data
discrete_ordered_data = ordered_data
discrete_ordered_data[::-1].sort()
ordered_data = add_uniform_noise(ordered_data, p = p_noise)
ordered_data[::-1].sort()
# perform Pickands estimation
if verbose:
print("Calculating Pickands...")
t1=time.time()
k_p_arr, xi_p_arr = pickands_estimator(ordered_data)
t2 =time.time()
if verbose:
print("Elapsed time (Pickands):", t2-t1)
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_pickands.dat"), "w") as f:
for i in range(len(k_p_arr)):
f.write(str(k_p_arr[i]) + " " + str(xi_p_arr[i]) + "\n")
# perform smooth Hill estimation
if verbose:
print("Calculating smooth Hill...")
t1=time.time()
k_sh_arr, xi_sh_arr = smooth_hill_estimator(ordered_data,
r_smooth = r_smooth)
t2=time.time()
if verbose:
print("Elapsed time (smooth Hill):", t2-t1)
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_sm_hill.dat"), "w") as f:
for i in range(len(k_sh_arr)):
f.write(str(k_sh_arr[i]) + " " + str(xi_sh_arr[i]) + "\n")
# perform adjusted Hill estimation
if verbose:
print("Calculating adjusted Hill...")
t1 = time.time()
hill_results = hill_estimator(ordered_data,
bootstrap = bootstrap_flag,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
diagn_plots = diagn_plots,
eps_stop = eps_stop,
verbose = verbose)
t2 =time.time()
if verbose:
print("Elapsed time (Hill):", t2-t1)
k_h_arr = hill_results[0]
xi_h_arr = hill_results[1]
k_h_star = hill_results[2]
xi_h_star = hill_results[3]
x1_h_arr, n1_h_amse, k1_h, max_h_index1 = hill_results[4:8]
x2_h_arr, n2_h_amse, k2_h, max_h_index2 = hill_results[8:]
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_adj_hill_plot.dat"), "w") as f:
for i in range(len(k_h_arr)):
f.write(str(k_h_arr[i]) + " " + str(xi_h_arr[i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_adj_hill_estimate.dat"), "w") as f:
f.write(str(k_h_star) + " " + str(xi_h_star) + "\n")
# perform moments estimation
if verbose:
print("Calculating moments...")
t1 = time.time()
moments_results = moments_estimator(ordered_data,
bootstrap = bootstrap_flag,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
diagn_plots = diagn_plots,
eps_stop = eps_stop,
verbose = verbose)
t2 = time.time()
if verbose:
print("Elapsed time (moments):", t2-t1)
k_m_arr = moments_results[0]
xi_m_arr = moments_results[1]
k_m_star = moments_results[2]
xi_m_star = moments_results[3]
x1_m_arr, n1_m_amse, k1_m, max_m_index1 = moments_results[4:8]
x2_m_arr, n2_m_amse, k2_m, max_m_index2 = moments_results[8:]
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_mom_plot.dat"), "w") as f:
for i in range(len(k_m_arr)):
f.write(str(k_m_arr[i]) + " " + str(xi_m_arr[i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_mom_estimate.dat"), "w") as f:
f.write(str(k_m_star) + " " + str(xi_m_star) + "\n")
# perform kernel-type estimation
if verbose:
print("Calculating kernel-type...")
t1 = time.time()
kernel_type_results = kernel_type_estimator(ordered_data, hsteps,
alpha = alpha,
bootstrap = bootstrap_flag,
t_bootstrap = t_bootstrap,
r_bootstrap = r_bootstrap,
diagn_plots = diagn_plots,
eps_stop = eps_stop,
verbose = verbose)
t2 = time.time()
if verbose:
print("Elapsed time (kernel-type):", t2-t1)
k_k_arr = kernel_type_results[0]
xi_k_arr = kernel_type_results[1]
k_k_star = kernel_type_results[2]
xi_k_star = kernel_type_results[3]
x1_k_arr, n1_k_amse, h1, max_k_index1 = kernel_type_results[4:8]
x2_k_arr, n2_k_amse, h2, max_k_index2 = kernel_type_results[8:]
if bootstrap_flag:
k_k1_star = np.argmin(np.abs(k_k_arr - k_k_star))
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_kern_plot.dat"), "w") as f:
for i in range(len(k_k_arr)):
f.write(str(k_k_arr[i]) + " " + str(xi_k_arr[i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_kern_estimate.dat"), "w") as f:
f.write(str(k_k_arr[k_k1_star]) + " " + str(xi_k_arr[k_k1_star]) + "\n")
# plotting part
if verbose:
print("Making plots...")
fig, axes = plt.subplots(3, 2, figsize = (12, 16))
for ax in axes.reshape(-1):
ax.tick_params(direction='out', length=6, width=1.5,
labelsize = 12, which = 'major')
ax.tick_params(direction='out', length=3, width=1, which = 'minor')
[i.set_linewidth(1.5) for i in ax.spines.values()]
# plot PDF
axes[0,0].set_xlabel(r"Degree $k$", fontsize = 20)
axes[0,0].set_ylabel(r"$P(k)$", fontsize = 20)
axes[0,0].loglog(x_pdf, y_pdf, color = "#386cb0", marker = "s",
lw = 1.5, markeredgecolor = "black")
# plot CCDF
axes[0,1].set_xlabel(r"Degree $k$", fontsize = 20)
axes[0,1].set_ylabel(r"$\bar{F}(k)$", fontsize = 20)
axes[0,1].set_xscale("log")
axes[0,1].set_yscale("log")
axes[0,1].step(x_ccdf, y_ccdf, color = "#386cb0", lw = 1.5)
# draw scalings
if noise_flag:
xmin = discrete_ordered_data[k_h_star]
else:
xmin = ordered_data[k_h_star]
x = x_ccdf[np.where(x_ccdf >= xmin)]
l = np.mean(y_ccdf[np.where(x == xmin)])
alpha = 1./xi_h_star
if xi_h_star > 0:
axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],
color = '#fb8072', ls = '--', lw = 2,
label = r"Adj. Hill Scaling $(\alpha="+\
str(np.round(1./xi_h_star, decimals = 3))+r")$")
axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],
color = "#fb8072", ls = 'none', marker = 'o',
markerfacecolor = 'none', markeredgecolor = "#fb8072",
markeredgewidth = 3, markersize = 10)
if noise_flag:
xmin = discrete_ordered_data[k_m_star]
else:
xmin = ordered_data[k_m_star]
x = x_ccdf[np.where(x_ccdf >= xmin)]
l = np.mean(y_ccdf[np.where(x == xmin)])
alpha = 1./xi_m_star
if xi_m_star > 0:
axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],
color = '#8dd3c7', ls = '--', lw = 2,
label = r"Moments Scaling $(\alpha="+\
str(np.round(1./xi_m_star, decimals = 3))+r")$")
axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],
color = "#8dd3c7", ls = 'none', marker = 'o',
markerfacecolor = 'none', markeredgecolor = "#8dd3c7",
markeredgewidth = 3, markersize = 10)
if noise_flag:
xmin = discrete_ordered_data[k_k_star]
else:
xmin = ordered_data[k_k_star]
x = x_ccdf[np.where(x_ccdf >= xmin)]
l = np.mean(y_ccdf[np.where(x == xmin)])
alpha = 1./xi_k_star
if xi_k_star > 0:
axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],
color = '#fdb462', ls = '--', lw = 2,
label = r"Kernel Scaling $(\alpha="+\
str(np.round(1./xi_k_star, decimals = 3))+r")$")
axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],
color = "#8dd3c7", ls = 'none', marker = 'o',
markerfacecolor = 'none', markeredgecolor = "#fdb462",
markeredgewidth = 3, markersize = 10)
axes[0,1].legend(loc = 'best')
# define min and max order statistics to plot
min_k = int(np.ceil(len(k_h_arr)**theta1)) - 1
max_k = int(np.floor(len(k_h_arr)**theta2)) - 1
# check if estimators' values are not too off in these bounds
min_k_index = (np.abs(k_sh_arr - min_k)).argmin()
max_k_index = (np.abs(k_sh_arr - max_k)).argmin()
if (xi_sh_arr[min_k_index] <= -3 or xi_sh_arr[min_k_index] >= 3):
indices_to_plot_sh = np.where((xi_sh_arr <= 3) & (xi_sh_arr >= -3))
elif (xi_sh_arr[max_k_index] <= -3 or xi_sh_arr[max_k_index] >= 3):
indices_to_plot_sh = np.where((xi_sh_arr <= 3) & (xi_sh_arr >= -3))
else:
indices_to_plot_sh = np.where((k_sh_arr <= max_k) & (k_sh_arr >= min_k))
axes[1,0].set_xlabel(r"Number of Order Statistics $\kappa$", fontsize = 20)
axes[1,0].set_ylabel(r"Estimated $\hat{\xi}$", fontsize = 20)
# plot smooth Hill
axes[1,0].plot(k_sh_arr[indices_to_plot_sh], xi_sh_arr[indices_to_plot_sh],
color = "#b3de69", alpha = 0.8, label = "Smooth Hill",
zorder = 10)
# plot adjusted Hill
# check if estimators' values are not too off in these bounds
if (xi_h_arr[min_k-1] <= -3 or xi_h_arr[min_k-1] >= 3):
indices_to_plot_h = np.where((xi_h_arr <= 3) & (xi_h_arr >= -3))
elif (xi_h_arr[max_k-1] <= -3 or xi_h_arr[max_k-1] >= 3):
indices_to_plot_h = np.where((xi_h_arr <= 3) & (xi_h_arr >= -3))
else:
indices_to_plot_h = np.where((k_h_arr <= max_k) & (k_h_arr >= min_k))
axes[1,0].plot(k_h_arr[indices_to_plot_h], xi_h_arr[indices_to_plot_h],
color = "#fb8072", alpha = 0.8, label = "Adjusted Hill",
zorder = 10)
if bootstrap_flag:
axes[1,0].scatter([k_h_arr[k_h_star-1]], [xi_h_arr[k_h_star-1]],
color = "#fb8072", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Hill}="\
+str(np.round([xi_h_arr[k_h_star-1]][0], decimals = 3))\
+r"$")
axes[1,0].legend(loc = "best")
axes[1,1].set_xlabel(r"Number of Order Statistics $\kappa$", fontsize = 20)
axes[1,1].set_ylabel(r"Estimated $\hat{\xi}$", fontsize = 20)
axes[1,1].set_xscale("log")
# plot smooth Hill
axes[1,1].plot(k_sh_arr[indices_to_plot_sh], xi_sh_arr[indices_to_plot_sh],
color = "#b3de69", alpha = 0.8, label = "Smooth Hill",
zorder = 10)
# plot adjusted Hill
indices_to_plot = np.where((k_h_arr <= max_k) & (k_h_arr >= min_k))
axes[1,1].plot(k_h_arr[indices_to_plot_h], xi_h_arr[indices_to_plot_h],
color = "#fb8072", alpha = 0.8, label = "Adjusted Hill",
zorder = 10)
if bootstrap_flag:
axes[1,1].scatter([k_h_arr[k_h_star-1]], [xi_h_arr[k_h_star-1]],
color = "#fb8072", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Hill}="\
+str(np.round([xi_h_arr[k_h_star-1]][0], decimals = 3))\
+r"$")
axes[1,1].legend(loc = "best")
axes[2,0].set_xlabel(r"Number of Order Statistics $\kappa$", fontsize = 20)
axes[2,0].set_ylabel(r"Estimated $\hat{\xi}$", fontsize = 20)
#plot Pickands
min_k_index = (np.abs(k_p_arr - min_k)).argmin()
max_k_index = (np.abs(k_p_arr - max_k)).argmin()
if (xi_p_arr[min_k_index] <= -3 or xi_p_arr[min_k_index] >= 3):
indices_to_plot_p = np.where((xi_p_arr <= 3) & (xi_p_arr >= -3))
elif (xi_p_arr[max_k_index] <= -3 or xi_p_arr[max_k_index] >= 3):
indices_to_plot_p = np.where((xi_p_arr <= 3) & (xi_p_arr >= -3))
else:
indices_to_plot_p = np.where((k_p_arr <= max_k) & (k_p_arr >= min_k))
axes[2,0].plot(k_p_arr[indices_to_plot_p], xi_p_arr[indices_to_plot_p],
color = "#bc80bd", alpha = 0.8, label = "Pickands",
zorder = 10)
#plot moments
if (xi_m_arr[min_k-1] <= -3 or xi_m_arr[min_k-1] >= 3):
indices_to_plot_m = np.where((xi_m_arr <= 3) & (xi_m_arr >= -3))
elif (xi_m_arr[max_k-1] <= -3 or xi_m_arr[max_k-1] >= 3):
indices_to_plot_m = np.where((xi_m_arr <= 3) & (xi_m_arr >= -3))
else:
indices_to_plot_m = np.where((k_m_arr <= max_k) & (k_m_arr >= min_k))
axes[2,0].plot(k_m_arr[indices_to_plot_m], xi_m_arr[indices_to_plot_m],
color = "#8dd3c7", alpha = 0.8, label = "Moments",
zorder = 10)
if bootstrap_flag:
axes[2,0].scatter([k_m_arr[k_m_star-1]], [xi_m_arr[k_m_star-1]],
color = "#8dd3c7", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Moments}="\
+str(np.round([xi_m_arr[k_m_star-1]][0], decimals = 3))\
+r"$")
#plot kernel-type
min_k_index = (np.abs(k_k_arr - min_k)).argmin()
max_k_index = (np.abs(k_k_arr - max_k)).argmin()
if (xi_k_arr[min_k_index] <= -3 or xi_k_arr[min_k_index] >= 3):
indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))
elif (xi_k_arr[max_k_index] <= -3 or xi_k_arr[max_k_index] >= 3):
indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))
else:
indices_to_plot_k = list(range(min_k_index, max_k_index))
#indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))
axes[2,0].plot(k_k_arr[indices_to_plot_k], xi_k_arr[indices_to_plot_k],
color = "#fdb462", alpha = 0.8, label = "Kernel",
zorder = 10)
if bootstrap_flag:
axes[2,0].scatter([k_k_arr[k_k1_star-1]], [xi_k_arr[k_k1_star-1]],
color = "#fdb462", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Kernel}="\
+str(np.round([xi_k_arr[k_k1_star-1]][0], decimals = 3))\
+r"$")
axes[2,0].legend(loc = "best")
# for clarity purposes, display only xi region between -1 and 1
axes[2,0].set_ylim((-0.5,1.5))
axes[2,1].set_xlabel(r"Number of Order Statistics $\kappa$", fontsize = 20)
axes[2,1].set_ylabel(r"Estimated $\hat{\xi}$", fontsize = 20)
axes[2,1].set_xscale("log")
#plot Pickands
axes[2,1].plot(k_p_arr[indices_to_plot_p], xi_p_arr[indices_to_plot_p],
color = "#bc80bd", alpha = 0.8, label = "Pickands",
zorder = 10)
#plot moments
axes[2,1].plot(k_m_arr[indices_to_plot_m], xi_m_arr[indices_to_plot_m],
color = "#8dd3c7", alpha = 0.8, label = "Moments",
zorder = 10)
if bootstrap_flag:
axes[2,1].scatter([k_m_arr[k_m_star-1]], [xi_m_arr[k_m_star-1]],
color = "#8dd3c7", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Moments}="\
+str(np.round([xi_m_arr[k_m_star-1]][0], decimals = 3))\
+r"$")
#plot kernel-type
axes[2,1].plot(k_k_arr[indices_to_plot_k], xi_k_arr[indices_to_plot_k],
color = "#fdb462", alpha = 0.8, label = "Kernel",
zorder = 10)
if bootstrap_flag:
axes[2,1].scatter([k_k_arr[k_k1_star-1]], [xi_k_arr[k_k1_star-1]],
color = "#fdb462", marker = "*", s = 100,
edgecolor = "black", zorder = 20,
label = r"$\widehat{\xi}^{Kernel}="\
+str(np.round([xi_k_arr[k_k1_star-1]][0], decimals = 3))\
+r"$")
# for clarity purposes, display only xi region between -1 and 1
axes[2,1].set_ylim((-0.5,1.5))
axes[2,1].legend(loc = "best")
if diagn_plots:
fig_d, axes_d = plt.subplots(1, 3, figsize = (18, 6))
# filter out boundary values using theta parameters for Hill
min_k1 = 2
max_k1 = len(x1_h_arr) - 1
min_k2 = 2
max_k2 = len(x2_h_arr) - 1
axes_d[0].set_yscale("log")
axes_d[0].set_xscale("log")
axes_d[1].set_xscale("log")
axes_d[2].set_xscale("log")
n1_h_amse[np.where((n1_h_amse == np.inf) |\
(n1_h_amse == -np.inf))] = np.nan
axes_d[0].set_ylim((0.1*np.nanmin(n1_h_amse[min_k1:max_k1]), 1.0))
axes_d[0].set_xlabel("Fraction of Bootstrap Order Statistics",
fontsize = 20)
axes_d[0].set_ylabel(r"$\langle AMSE \rangle$", fontsize = 20)
axes_d[0].set_title("Adjusted Hill Estimator", fontsize = 20)
# plot AMSE and corresponding minimum
axes_d[0].plot(x1_h_arr[min_k1:max_k1], n1_h_amse[min_k1:max_k1],
alpha = 0.5, lw = 1.5,
color = "#d55e00", label = r"$n_1$ samples")
axes_d[0].scatter([k1_h], [n1_h_amse[int(len(x1_h_arr)*k1_h)-1]],
color = "#d55e00",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_1$ sample")
axes_d[0].plot(x2_h_arr[min_k2:max_k2], n2_h_amse[min_k2:max_k2],
alpha = 0.5, lw = 1.5,
color = "#0072b2", label = r"$n_2$ samples")
axes_d[0].scatter([k2_h], [n2_h_amse[int(len(x2_h_arr)*k2_h)-1]],
color = "#0072b2",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_2$ sample")
axes_d[0].axvline(max_h_index1/float(len(x1_h_arr)), color = "#d55e00",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_1$ sample")
axes_d[0].axvline(max_h_index2/float(len(x2_h_arr)), color = "#0072b2",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_2$ sample")
axes_d[0].legend(loc = "best")
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_adjhill_diagn1.dat"), "w") as f:
for i in range(len(x1_h_arr[min_k1:max_k1])):
f.write(str(x1_h_arr[min_k1:max_k1][i]) + " " + str(n1_h_amse[min_k1:max_k1][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_adjhill_diagn2.dat"), "w") as f:
for i in range(len(x2_h_arr[min_k2:max_k2])):
f.write(str(x2_h_arr[min_k2:max_k2][i]) + " " + str(n2_h_amse[min_k2:max_k2][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_adjhill_diagn_points.dat"), "w") as f:
f.write("Min for n1 sample: "+str(k1_h)+" "+str(n1_h_amse[int(len(x1_h_arr)*k1_h)-1])+"\n")
f.write("Min for n2 sample: "+str(k2_h)+" "+str(n2_h_amse[int(len(x2_h_arr)*k2_h)-1])+"\n")
f.write("Minimization boundary for n1 sample: "+str(max_h_index1/float(len(x1_h_arr)))+"\n")
f.write("Minimization boundary for n2 sample: "+str(max_h_index2/float(len(x2_h_arr)))+"\n")
# filter out boundary values using theta parameters for moments
min_k1 = 2
max_k1 = len(x1_m_arr) - 1
min_k2 = 2
max_k2 = len(x2_m_arr) - 1
n1_m_amse[np.where((n1_m_amse == np.inf) |\
(n1_m_amse == -np.inf))] = np.nan
axes_d[1].set_yscale("log")
axes_d[1].set_ylim((0.1*np.nanmin(n1_m_amse[min_k1:max_k1]), 1.0))
axes_d[1].set_xlabel("Fraction of Bootstrap Order Statistics",
fontsize = 20)
axes_d[1].set_ylabel(r"$\langle AMSE \rangle$", fontsize = 20)
axes_d[1].set_title("Moments Estimator", fontsize = 20)
# plot AMSE and corresponding minimum
axes_d[1].plot(x1_m_arr[min_k1:max_k1], n1_m_amse[min_k1:max_k1],
alpha = 0.5, lw = 1.5,
color = "#d55e00", label = r"$n_1$ samples")
axes_d[1].scatter([k1_m], [n1_m_amse[int(len(x1_m_arr)*k1_m)-1]],
color = "#d55e00",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_1$ sample")
axes_d[1].plot(x2_m_arr[min_k2:max_k2], n2_m_amse[min_k2:max_k2],
alpha = 0.5, lw = 1.5,
color = "#0072b2", label = r"$n_2$ samples")
axes_d[1].scatter([k2_m], [n2_m_amse[int(len(x2_m_arr)*k2_m)-1]],
color = "#0072b2",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_2$ sample")
axes_d[1].axvline(max_m_index1/float(len(x1_m_arr)), color = "#d55e00",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_1$ sample")
axes_d[1].axvline(max_m_index2/float(len(x2_m_arr)), color = "#0072b2",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_2$ sample")
axes_d[1].legend(loc = "best")
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_mom_diagn1.dat"), "w") as f:
for i in range(len(x1_m_arr[min_k1:max_k1])):
f.write(str(x1_m_arr[min_k1:max_k1][i]) + " " + str(n1_m_amse[min_k1:max_k1][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_mom_diagn2.dat"), "w") as f:
for i in range(len(x2_m_arr[min_k2:max_k2])):
f.write(str(x2_m_arr[min_k2:max_k2][i]) + " " + str(n2_m_amse[min_k2:max_k2][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_mom_diagn_points.dat"), "w") as f:
f.write("Min for n1 sample: "+str(k1_m)+" "+str(n1_m_amse[int(len(x1_m_arr)*k1_m)-1])+"\n")
f.write("Min for n2 sample: "+str(k2_m)+" "+str(n2_m_amse[int(len(x2_m_arr)*k2_m)-1])+"\n")
f.write("Minimization boundary for n1 sample: "+str(max_m_index1/float(len(x1_m_arr)))+"\n")
f.write("Minimization boundary for n2 sample: "+str(max_m_index2/float(len(x2_m_arr)))+"\n")
min_k1 = 2
max_k1 = len(x1_k_arr)
min_k2 = 2
max_k2 = len(x2_k_arr)
n1_k_amse[np.where((n1_k_amse == np.inf) |\
(n1_k_amse == -np.inf))] = np.nan
axes_d[2].set_yscale("log")
axes_d[2].set_ylim((0.1*np.nanmin(n1_k_amse[min_k1:max_k1]), 1.0))
axes_d[2].set_xlabel("Fraction of Bootstrap Order Statistics",
fontsize = 20)
axes_d[2].set_ylabel(r"$\langle AMSE \rangle$", fontsize = 20)
axes_d[2].set_title("Kernel-type Estimator", fontsize = 20)
# plot AMSE and corresponding minimum
axes_d[2].plot(x1_k_arr[min_k1:max_k1], n1_k_amse[min_k1:max_k1],
alpha = 0.5, lw = 1.5,
color = "#d55e00", label = r"$n_1$ samples")
axes_d[2].scatter([h1], [n1_k_amse[np.where(x1_k_arr == h1)]], color = "#d55e00",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_1$ sample")
# plot boundary of minimization
axes_d[2].axvline(max_k_index1, color = "#d55e00",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_2$ sample")
axes_d[2].plot(x2_k_arr[min_k2:max_k2], n2_k_amse[min_k2:max_k2],
alpha = 0.5, lw = 1.5,
color = "#0072b2", label = r"$n_2$ samples")
axes_d[2].scatter([h2], [n2_k_amse[np.where(x2_k_arr == h2)]], color = "#0072b2",
marker = 'o', edgecolor = "black", alpha = 0.5,
label = r"Min for $n_2$ sample")
axes_d[2].axvline(max_k_index2, color = "#0072b2",
ls = '--', alpha = 0.5,
label = r"Minimization boundary for $n_2$ sample")
axes_d[2].legend(loc = "best")
if savedata == 1:
with open(os.path.join(output_dir+"/"+output_name+"_kern_diagn1.dat"), "w") as f:
for i in range(len(x1_k_arr[min_k1:max_k1])):
f.write(str(x1_k_arr[min_k1:max_k1][i]) + " " + str(n1_k_amse[min_k1:max_k1][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_kern_diagn2.dat"), "w") as f:
for i in range(len(x2_m_arr[min_k2:max_k2])):
f.write(str(x2_k_arr[min_k2:max_k2][i]) + " " + str(n2_k_amse[min_k2:max_k2][i]) + "\n")
with open(os.path.join(output_dir+"/"+output_name+"_kern_diagn_points.dat"), "w") as f:
f.write("Min for n1 sample: "+str(h1)+" "+str(n1_k_amse[np.where(x1_k_arr == h1)][0])+"\n")
f.write("Min for n2 sample: "+str(h2)+" "+str(n2_k_amse[np.where(x2_k_arr == h2)][0])+"\n")
f.write("Minimization boundary for n1 sample: "+str(n1_k_amse[int(max_k_index1*hsteps)-1])+"\n")
f.write("Minimization boundary for n2 sample: "+str(n2_k_amse[int(max_k_index2*hsteps)-1])+"\n")
fig_d.tight_layout()
diag_plots_path = output_dir+"/"+output_name+"_diag.pdf"
fig_d.savefig(diag_plots_path)
fig.tight_layout(pad = 0.2)
fig.savefig(output_file_path)
# ==========================
# ========== Main ==========
# ==========================
def main():
#ignore warnings other than explicit ones
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description =
"Script to compute tail index estimates\
for a provided dataset.")
parser.add_argument("sequence_file_path",
help = "Path to a data sequence.", type = str)
parser.add_argument("output_file_path",
help = "Output path for plots. Use either PDF or\
PNG format.", type = str)
parser.add_argument("--nbins",
help = "Number of bins for degree\
distribution (default = 30)", type = int,
default = 30)
parser.add_argument("--rsmooth",
help = "Smoothing parameter for smooth Hill estimator\
(default = 2)",
type = int, default = 2)
parser.add_argument("--alphakernel",
help = "Alpha parameter used for kernel-type estimator.\
Should be greater than 0.5 (default = 0.6).",
type = float, default = 0.6)
parser.add_argument("--hsteps",
help = "Parameter to select number of bandwidth\
steps for kernel-type estimator, (default = 200).",
type = int, default = 200)
parser.add_argument("--noise",
help = "Switch on/off uniform noise in range\
[-5*10^(-p), 5*10^(-p)] that is added to each\
data point. Used for integer-valued sequences\
with p = 1 (default = 1).",
type = int, default = 1)
parser.add_argument("--pnoise",
help = "Uniform noise parameter corresponding to\
the rounding error of the data sequence. For integer\
values it equals to 1. (default = 1).",
type = int, default = 1)
parser.add_argument("--bootstrap",
help = "Flag to switch on/off double-bootstrap\
algorithm for defining optimal order statistic\
of Hill, moments and kernel-type estimators.\
(default = 1)",
type = int, default = 1)
parser.add_argument("--tbootstrap",
help = "Fraction of bootstrap samples in the 2nd\
bootstrap defined as n*tbootstrap, i.e., for\
n*0.5 a n/2 is the size of a single bootstrap\
sample (default = 0.5).",
type = float, default = 0.5)
parser.add_argument("--rbootstrap",
help = "Number of bootstrap resamplings used in\
double-bootstrap. Note that each sample results\
are stored in an array, so be careful about the\
memory (default = 500).",
type = int, default = 500)
parser.add_argument("--amseborder",
help = "Upper bound for order statistic to consider\
for double-bootstrap AMSE minimizer.\
Entries that are smaller or equal to the border value\
are ignored during AMSE minimization (default = 1).",
type = float, default = 1.0)
parser.add_argument("--theta1",
help = "Lower bound of plotting range, defined as\
k_min = ceil(n^theta1), (default = 0.01).\
Overwritten if plots behave badly within the range.",
type = float, default = 0.01)
parser.add_argument("--theta2",
help = "Upper bound of plotting range, defined as\
k_max = floor(n^theta2), (default = 0.99).\
Overwritten if plots behave badly within the range.",
type = float, default = 0.99)
parser.add_argument("--diagplots",
help = "Flag to switch on/off plotting AMSE statistics\
for Hill/moments/kernel-type double-bootstrap algorithm.\
Used for diagnostics when double-bootstrap provides unstable\
results. Can be used to find proper amseborder parameter.\
(default = 0).",
type = int, default = 0)
parser.add_argument("--verbose",
help = "Verbosity of bootstrap procedure.\
(default = 0).",
type = int, default = 0)
parser.add_argument("--savedata",
help = "Flag to save data files in the directory\
with plots.\
(default = 0)",
type = int, default = 0)
parser.add_argument("--delimiter",
help = "Delimiter used in the input file.\
Options are: whitespace, tab, comma, semicolon.",
type = str, default = "whitespace")
args = parser.parse_args()
# check arguments for consistency
if args.nbins <= 0:
parser.error("Number of bins should be greater than 0.")
if args.rsmooth < 2:
parser.error("r_smooth should be greater than 1.")
if args.alphakernel <= 0.5:
parser.error("alpha of kernel estimator should be grater than 0.5.")
if args.hsteps <= 0:
parser.error("hsteps should be greater than 0.")
if args.noise != 0 and args.noise != 1:
parser.error("noise flag should be 0 or 1.")
if args.pnoise < 0:
parser.error("pnoise parameter should be greater or equal to 0.")
if args.bootstrap != 0 and args.bootstrap != 1:
parser.error("bootstrap flag should be 0 or 1.")
if args.tbootstrap <= 0.0 or args.tbootstrap >= 1.0:
parser.error("tbootstrap should be in range (0, 1).")
if args.rbootstrap <= 0:
parser.error("Number of bootstrap resamples should be greater than 0.")
if args.amseborder <= 0.0:
parser.error("amseborder should be greater than 0.")
if args.diagplots != 0 and args.diagplots != 1:
parser.error("diagplots flag should be 0 or 1.")
if args.verbose != 0 and args.verbose != 1:
parser.error("verbose flag should be 0 or 1.")
if args.savedata != 0 and args.savedata != 1:
parser.error("savedata flag should be 0 or 1.")
if args.theta1 < 0.0 or args.theta1 > 1.0:
parser.error("Theta parameters should be in [0,1] range, where theta1 < theta2.")
if args.theta2 < 0.0 or args.theta2 > 1.0:
parser.error("Theta parameters should be in [0,1] range, where theta1 < theta2.")
if args.theta2 <= args.theta1:
parser.error("Theta parameters should be in [0,1] range, where theta1 < theta2.")
if args.delimiter not in set(['whitespace', 'tab', 'comma', 'semicolon']):
parser.error("Delimiter provided is not supported.")
number_of_bins = args.nbins
r_smooth = args.rsmooth
alpha = args.alphakernel
hsteps = args.hsteps
if args.noise == 1:
noise_flag = True
else:
noise_flag = False
p_noise = args.pnoise
if args.bootstrap == 1:
bootstrap_flag = True
else:
bootstrap_flag = False
t_bootstrap = args.tbootstrap
r_bootstrap = args.rbootstrap
amse_border = args.amseborder
if args.diagplots == 1:
diagnostic_plots_flag = True
else:
diagnostic_plots_flag = False
if args.verbose == 1:
verbose = True
else:
verbose = False
if args.delimiter == "whitespace":
delimiter = " "
elif args.delimiter == "tab":
delimiter = "\t"
elif args.delimiter == "comma":
delimiter = ","
elif args.delimiter == "semicolon":
delimiter = ";"
# check for number of entries
N = 0
with open(args.sequence_file_path, "r") as f:
for line in f:
degree, count = line.strip().split(delimiter)
N += int(count)
print("========== Tail Index Estimation ==========")
print("Number of data entries: %i" % N)
ordered_data = np.zeros(N)
current_index = 0
with open(args.sequence_file_path, "r") as f:
for line in f:
degree, count = line.strip().split(delimiter)
ordered_data[current_index:current_index + int(count)] = float(degree)
current_index += int(count)
#enforce minimization boundary to the order statistics larger than border value
eps_stop = 1 - float(len(ordered_data[np.where(ordered_data <= amse_border)]))\
/len(ordered_data)
print("========================")
print("Selected AMSE border value: %0.4f"%amse_border)
print("Selected fraction of order statistics boundary for AMSE minimization: %0.4f"%eps_stop)
print("========================")
make_plots(ordered_data, args.output_file_path, number_of_bins,
r_smooth, alpha, hsteps, bootstrap_flag, t_bootstrap,
r_bootstrap, diagnostic_plots_flag, eps_stop,
args.theta1, args.theta2, verbose, noise_flag,
p_noise, args.savedata)
if __name__ == '__main__':
t1 = time.time()
main()
t2 = time.time()
print("Elapsed time (total):", t2-t1)
``` |
{
"source": "jkbren/rank-turbulence-divergence",
"score": 3
} |
#### File: rank-turbulence-divergence/code/rtd.py
```python
import numpy as np
from collections import Counter
import itertools as it
from scipy.stats import rankdata
def get_combined_domain(X1, X2):
"""
Returns a list of the unique elements in two list-like objects. Note that
there's a lot of ways to make this function, but given how the rest of the
rank-turbulence divergence function is structured, it's nice to have this
self-contained version.
Parameters
----------
X1, X2 (list or np.ndarray or dict):
Two list-like objects with domains that need to be joined.
Returns
-------
combined_domain (list):
List of unique elements in the two inputs.
"""
combined_domain = list(set(X1) | set(X2))
return combined_domain
def get_rank_dictionary(X, C):
"""
Returns a dictionary where the keys are the items being ranked and the
values are their corresponding ranks, using fractional rankings.
Parameters
----------
X (list or np.ndarray or dict):
Either a list of raw data (which will need to be counted and reshaped)
or a dictionary of {element:counts} or a rank-ordered list of elements.
See the documentation for rank_turbulence_divergence for more details
about what types of inputs should be provided.
C (dict):
Empty dictionary to be populated by counts, then ranked.
Returns
-------
R (dict):
dict where the keys are the ranked elements and the values are their
fractional ranking.
N (int):
Number of unique elements in X.
"""
if type(X) == dict:
dtype_dict = True
N = len(X)
c = X.copy()
else:
dtype_dict = False
N = len(set(list(X)))
if not dtype_dict:
if len(np.unique(X)) == len(X):
m = list(range(len(X)))
aug = [[v] * (m[len(m) - i - 1] + 1) for i, v in enumerate(X)]
x = list(it.chain.from_iterable(aug))
c = dict(Counter(x))
else:
c = dict(Counter(X))
for k, v in c.items():
C[k] += v
d = list(C.keys())
counts = list(C.values())
# strange step, but scipy's ranking function is reversed
ranking = len(counts) - rankdata(counts) + 1
R = dict(zip(d, ranking))
return R, N
def rank_turbulence_divergence(X1, X2, alpha=1.0):
r"""
Calculates the rank turbulence divergence between two ordered rankings,
$R_1$ and $R_2$. This is done via the following equation, with a tunable
``inverse temperature'' parameter, alpha.
$ D_{\alpha}^{R}(R_1||R_2) =
\dfrac{1}{\mathcal{N}_{1,2;\alpha}}
\dfrac{\alpha+1}{\alpha}
\sum_{\tau \in R_{1,2;\alpha}}
\Big\vert \dfrac{1}{\big[r_{\tau,1}\big]^\alpha} -
\dfrac{1}{\big[r_{\tau,2}\big]^\alpha} \Big\vert^{1/(\alpha+1)} $
where The $\mathcal{N}_{1,2,\alpha}$ term refers to a normalization factor
that forces the rank-turbulence divergence between 0 and 1, as follows:
$ \mathcal{N}_{1,2;\alpha} =
\dfrac{\alpha+1}{\alpha}
\sum_{\tau \in R_1}
\Big\vert \dfrac{1}{\big[r_{\tau,1}\big]^\alpha} -
\dfrac{1}{\big[N_1+\frac{1}{2}N_2\big]^\alpha} \Big\vert^{1/(\alpha+1)}
+ \dfrac{\alpha+1}{\alpha} \sum_{\tau \in R_1} \Big\vert
\dfrac{1}{\big[N_2 + \frac{1}{2}N_1\big]^\alpha} -
\dfrac{1}{\big[r_{\tau,2}\big]^\alpha} \Big\vert^{1/(\alpha+1)} $
where $N_1$ and $N_2$ are the sizes of $R_1$ and $R_2$ (i.e. the number)
of things being ranked.
Parameters
----------
X1, X2 (list or np.ndarray, or dict):
Two rank-ordered vectors, that do not need to be of the same domain. It
admits the following datatypes:
1) X1 = ['mary','jane','chelea','ann'],
X2 = ['ann','jane','barb','crystal']
...as two already-ranked lists of $\tau$s. In X1, then, 'mary'
would be in rank position 1.0, 'jane' in 2.0, etc.
2) X1 = ['mary','mary','mary','mary','mary','mary','jane','jane',
'jane','chelsea','chelsea','barb']
X2 = ['ann','ann','ann','ann','ann','jane','jane','jane',
'jane','barb','barb','crystal']
...as two "raw" datasets, without pre-counting the number of
elements in each list. Ultimately, in X1, 'mary' shows up 6
timees, 'jane' shows up 3 times, 'chelsea' shows up 2 times,
and 'ann' shows up once. This function transforms this input
data into a dictionary of counts, then ultimately a dictionary
of ranks, such that $R_1$ and $R_2$ vectors for this example
are the same as in the first example.
3) X1 = {'mary':6, 'jane':3, 'chelsea':2, 'ann':1}
X2 = {'ann':5, 'jane':4, 'barb':2, 'crystal':1}
...as two dictionaries of {tau:count}. This might be useful in
a setting where you're given, for example, vote counts (i.e.,
{'<NAME>':4000, '<NAME>':2000, ... etc}).
alpha (float):
Tuning parameter, acts like an inverse temperature, such that a higher
value will ``zoom in'' on the data, making small deviations appear very
important to the final ranking. alpha ranges from 0 to infinity.
Returns
-------
Q (float):
The rank turbulence divergence between $R_1$ and $R_2$, a scalar
value between 0 and 1.
"""
combined_domain = get_combined_domain(X1, X2)
C1 = {i: 0 for i in combined_domain}
C2 = {i: 0 for i in combined_domain}
# Turn both vectors into dictionaries where the key is $\tau$, the property
# that's being ranked (popular baby names, sports teams, etc.), and the
# values are their (fractional) rank. This is gonna be useful when we loop
# through all $\tau$s in order to calculate the rank turbulence divergence.
R1, N1 = get_rank_dictionary(X1, C1)
R2, N2 = get_rank_dictionary(X2, C2)
# Then we're gonna be using certain terms frequently, so might as well
# turn those values into their own variables and give them useless names.
alph_exp = 1 / (alpha+1)
alph_mul = (alpha+1) / alpha
normN1 = (N1 + 0.5 * N2)**(-alpha)
normN2 = (N2 + 0.5 * N1)**(-alpha)
# as we loop through the elements in combined_domain, we'll be gradually
# adding to these numbers.
norm_1 = 0
norm_2 = 0
Q = 0
for tau in combined_domain:
r1tau_exp_negalpha = R1[tau]**(-alpha)
r2tau_exp_negalpha = R2[tau]**(-alpha)
dQ = np.abs(r1tau_exp_negalpha - r2tau_exp_negalpha)
norm_1 += np.abs(r1tau_exp_negalpha - normN1)**alph_exp
norm_2 += np.abs(normN2 - r2tau_exp_negalpha)**alph_exp
Q += dQ**alph_exp
Cr = alph_mul * norm_1 + alph_mul * norm_2
Q = 1/Cr * alph_mul * Q
return Q
def main():
"""Empty main function."""
return
if __name__ == '__main__':
main()
``` |
{
"source": "jkcaldwe/Robinhood",
"score": 3
} |
#### File: Robinhood/docs/qtapp.py
```python
import sys
from PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication
from PyQt5 import QtCore
import logging
import auto_trader
#Support default dict list
from collections import defaultdict
#Date
import datetime
#configure logging to print debug messages on the screen
logging.basicConfig(level=logging.DEBUG)
class TraderGui(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
# Define global variables for the class instance
self.quoteTimer = QtCore.QTimer();
self.positionTimer = QtCore.QTimer();
#Support for control symbols which influence the market
self.control_symbols = ["SPY"];
#Symbols of the positions I'm currently in with quantities
self.position_symbols = defaultdict(list);
#Symbols for positions which the app has sold with quantities sold
self.sold_symbols = defaultdict(list);
#compiled dict will all above symbols. Stores previous close, open and stock prices every x minutes to be used by the analysis and weighted average
self.position_quotes = defaultdict(list);
#takes data from position quotes to store weighted linear averages every x minutes as defined by timers
self.weighted_averages = defaultdict(list);
# Call main trader logic
self.main();
def initUI(self):
# Define UI elements
btn1 = QPushButton("Button 1", self)
btn1.move(30, 50)
btn2 = QPushButton("Button 2", self)
btn2.move(150, 50)
#Connect UI elements to slots
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked)
self.statusBar()
#Define main window
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Event sender')
self.show()
def main(self):
#log in and return trade handle. 'self' defines it as a global across the class
self.my_trader = auto_trader.login();
auto_trader.buyPosition(self.my_trader, "UUUU", 2);
# # Get current open positions and quantity of the position
# self.position_symbols = auto_trader.getCurrentPositions(self.my_trader);
# logging.info(self.position_symbols);
# #Create data array for each symbol in my positions and initialize with opening and current quote
# position_quotes_init = defaultdict(list);
# self.position_quotes = auto_trader.populateQuoteData(self.position_symbols, self.sold_symbols, self.control_symbols, position_quotes_init);
# logging.info(self.position_quotes);
# #Create a timer and append current quote data to the position quote list
# self.quoteTimer.timeout.connect(self.func_quoteTimer);
# #Timer will repeat every 2 minutes
# self.quoteTimer.start(30000);
# self.positionTimer.timeout.connect(self.func_posSymbolsTimer);
# #Trigger every 15 minutes to update current positions
# self.positionTimer.start(900000);
def buttonClicked(self):
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
logging.info(sender.text() + ' was pressed');
def func_quoteTimer(self):
#Update postion quotes with new data every time timer expires
self.position_quotes = auto_trader.populateQuoteData(self.position_symbols, self.sold_symbols, self.control_symbols, self.position_quotes);
#Define the window size for the moving average
window_size = 3
#Find data for each position and if there is enough to generate a running weighted average, do it and put in weigted average dict
for symbol in self.position_quotes:
tempQuoteList = self.position_quotes[symbol];
logging.info(tempQuoteList);
if (len(tempQuoteList) > (window_size + 1)):
#Append returned weighted average for the window size to the weighted averages list
self.weighted_averages[symbol].append(auto_trader.calcWeightedAverage(tempQuoteList, window_size));
logging.info (self.weighted_averages);
#send weighted averages list for buy/sell analysis
def func_posSymbolsTimer(self):
self.position_symbols = auto_trader.getCurrentPositions(self.my_trader);
logging.info(self.position_symbols);
#Define timer as global so it will keep running while app is alive
# timer = QtCore.QTimer();
# Global to get position quotes
# position_quotes = [];
#Start application
app = QApplication(sys.argv)
autoTrader = TraderGui()
sys.exit(app.exec_())
``` |
{
"source": "jkcg-learning/MPoseDataset_2021",
"score": 2
} |
#### File: MPoseDataset_2021/scripts/create_rgb_pose.py
```python
from scripts.init_vars import *
import os
import glob
import scripts.lib.lib_seq as ls
import numpy as np
# remove empty frames at the beginning and at the end
def trim_seq(s, d, f):
if d[0] == 0:
start = next(i for i, obj in enumerate(d) if obj == 1)
else:
start = 0
if d[-1] == 0:
end = -1-next(i for i, obj in enumerate(d[::-1]) if obj == 1)
return s[:, :, start:end], d[start:end], f[start:end]
else:
return s[:, :, start:], d[start:], f[start:]
# split sequence in parts with min_frame_length >= frames >= max_frame_length
def split_seq(s, d, f, meta, video, trim=True):
if trim:
s, d, f = trim_seq(s, d, f)
s = s[:, :, d == 1]
frames = f[d == 1]
for count, start in enumerate(range(0, s.shape[2], max_frame_length)):
sub_s = s[:, :, start:start+max_frame_length]
fra = frames[start:start + max_frame_length]
if sub_s.shape[2] >= min_frame_length:
name = '{}-{}-{}'.format(meta['sample'].replace('.avi', ''),
int(fra[0]),
int(fra[-1]))
ls.save_sequence(seq=sub_s,
det=np.ones((sub_s.shape[2])),
fra=fra,
name=name,
meta=meta,
video=video)
def read_video(path):
vidcap = cv2.VideoCapture(path)
success, image = vidcap.read()
frames = []
while success:
frames.append(image)
success, image = vidcap.read()
vidcap.release()
cv2.destroyAllWindows()
return frames
def create_rgb_pose(force=False, verbose=True):
jsons = os.listdir(paths['json'])
print(paths['json'])
print(paths['rgb'])
if any(os.scandir(paths['rgb'])) and not force:
if verbose:
print('RGB and Poses already processed, skipping...')
return
for i in jsons:
print(i)
meta = ls.get_meta(i)
print(meta)
if glob.glob(os.path.join(paths['rgb'], i.split('.')[-2]+'-*')) and not force:
if verbose:
print(f'{i} already processed, skipping...')
continue
elif verbose:
print('Processing {}...'.format(i))
video = read_video(os.path.join(paths['video'], i))
seq, det, fra = ls.read_sequence(paths['json'] + i)
split_seq(s=seq, d=det, f=fra, meta=meta, video=video)
```
#### File: MPoseDataset_2021/scripts/create_splits.py
```python
from scripts.init_vars import *
import pandas as pd
from scripts.lib.lib_common import read_poses
testing_actors = {1: ['person12',
'person23',
'person08',
'person02',
'person24',
'anna',
'jiawei',
'andreas3',
'julien3',
'daniel3',
'alba2',
'clare3',
'andreas2',
'jon',
'joe',
'lyova',
'shahar',
's02',
's07',
'ss1',
'ss4'],
2: ['person01',
'person07',
'person16',
'person20',
'person25',
'karam',
'zeyu',
'florian1',
'daniel2',
'andreas1',
'nicolas3',
'amel1',
'chiara3',
'jean',
'haidi',
'daria',
'eli',
's08',
's05',
'ss7',
'ss8'],
3: ['person03',
'person11',
'person17',
'person21',
'person14',
'scott',
'zaid',
'hedlena3',
'julien1',
'nicolas2',
'amel3',
'daniel1',
'chiara1',
'hansung',
'nikos',
'moshe',
'ira',
's04',
's10',
'ss2',
'ss5']}
def create_splits(force=False, verbose=True):
if os.path.isfile(logs_path + 'train_test_split1.txt') and not force:
if verbose:
print(f'Splits already generated, skipping...')
return
report = read_poses()
for i in range(1, 4):
split = pd.DataFrame({'sample': report.loc[report.actor.isin(testing_actors[i]), 'sample'],
'set': 'test'})
split = pd.concat([split,
pd.DataFrame({'sample': report.loc[~report.actor.isin(testing_actors[i]), 'sample'],
'set': 'train'})])
split.to_csv(logs_path + 'train_test_split{}.txt'.format(i), sep='\t', index=None, header=None)
if verbose:
print('Splits generated!')
```
#### File: MPoseDataset_2021/scripts/extract_posenet_pose.py
```python
import shutil
from zipfile import ZipFile
from scripts.init_vars import archives_paths, paths
import os
import tarfile
def unzip(filename, save_to):
with ZipFile(filename, 'r') as zipObj:
for member in zipObj.namelist():
file = os.path.basename(member)
# skip directories
if not file:
continue
source = zipObj.open(member)
target = open(os.path.join(save_to, file), "wb")
with source, target:
shutil.copyfileobj(source, target)
def extract_posenet_pose(force=False, verbose=True):
if verbose:
print('Extracting PoseNet poses...')
if any(os.scandir(paths['posenet'])) and not force:
if verbose:
print('\t{} already extracted, skipping...'.format(os.listdir(archives_paths['posenet'])[0]))
else:
if verbose:
print('\tExtracting: {}'.format(os.listdir(archives_paths['posenet'])[0]))
unzip(filename=os.path.join(archives_paths['posenet'],
os.listdir(archives_paths['posenet'])[0]),
save_to=paths['posenet'])
```
#### File: scripts/lib/lib_common.py
```python
from scripts.init_vars import paths
import os
import pickle
import pandas as pd
import numpy as np
import warnings
def read_poses(path=paths['pose']):
report = pd.DataFrame(columns=['sample', 'dataset', 'actor', 'action', 'length', 'aver_conf', 'fn%'])
for i in sorted(os.listdir(path)):
with open(os.path.join(path, i), 'rb') as f:
d = pickle.load(f)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
report = report.append({'sample': d['name'],
'dataset': d['dataset'],
'actor': d['actor'],
'action': d['action'],
'length': d['length'],
'aver_conf': np.nanmean(d['seq'][:, 2, :]),
'fn%': np.isnan(d['seq'][:, [0, 1], :]).sum()/d['seq'][:, [0, 1], :].size}, ignore_index=True)
return report
```
#### File: MPoseDataset_2021/scripts/refine_dataset.py
```python
import os
from scripts.init_vars import *
import pandas as pd
from scripts.lib.lib_seq import get_meta
import pickle as pkl
def change_meta(file, new_action):
dict = pkl.load(open(file, 'rb'))
dict['name'] = os.path.basename(file)[:-2].replace(dict['action'], new_action)
dict['action'] = new_action
pkl.dump(dict, open(file, 'wb'))
def refine_data(verbose=False):
# redefine outliers (reassign/remove)
for i in os.listdir(misc_paths['outliers']):
outliers = pd.read_csv(os.path.join(misc_paths['outliers'], i), delimiter='\t', header=None)
for k, row in outliers.iterrows():
sample = row[0]
new_action = row[1]
meta = get_meta(sample, is_video=False)
if meta['action'] == new_action:
continue
else:
if new_action in actions.keys():
new_sample = sample.replace(meta['action'], new_action)
old = os.path.join(paths['rgb'], sample+'.avi')
new = os.path.join(paths['rgb'], new_sample+'.avi')
if (not os.path.exists(old)) and os.path.exists(new):
if verbose:
print('\t (done previously) renamed: {} into {}'.format(sample, new_sample))
elif os.path.exists(old) and (not os.path.exists(new)):
os.rename(old, new)
old = os.path.join(paths['pose'], sample+'.p')
new = os.path.join(paths['pose'], new_sample+'.p')
change_meta(file=old, new_action=new_action)
os.rename(old, new)
print('Renamed: {} into {}'.format(sample, new_sample))
elif new_action == 'remove':
old = os.path.join(paths['rgb'], sample+'.avi')
if not os.path.exists(old):
if verbose:
print('\t (done previously) TRASHED: {}'.format(sample))
else:
os.remove(os.path.join(paths['rgb'], sample + '.avi'))
os.remove(os.path.join(paths['pose'], sample + '.p'))
print('TRASHED: {}'.format(sample))
def refine_dataset(verbose=False):
print('Refining Data...')
refine_data(verbose=verbose)
print('Checking Everything...')
refine_data()
``` |
{
"source": "jkchandalia/toxic-comment-classifier",
"score": 3
} |
#### File: toxic-comment-classifier/flask_model/app.py
```python
import pickle
import numpy as np
import pandas as pd
from flask import Flask, request
from joblib import dump, load
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import json
model = None
app = Flask(__name__)
def load_model():
global model
global vectorizer
# model variable refers to the global variable
model = load('models/model.joblib')
preprocessor = load('models/preprocessing.joblib')
@app.route('/')
def home_endpoint():
return 'Hello World!'
@app.route('/predict', methods=['POST'])
def get_prediction():
if request.method == 'POST':
data = request.get_json() # Get data posted as a json
comment_vectorized = preprocessor.transform(data)
out = list(model.predict_proba(comment_vectorized)[:,1])
return json.dumps(out)
if __name__ == '__main__':
load_model() # load model at the beginning once only
app.run(host='0.0.0.0', port=80, ssl_context='adhoc')
```
#### File: toxic-comment-classifier/gcp/main.py
```python
import json
import numpy as np
import pandas as pd
from joblib import load
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
model = load('models/model.joblib')
preprocessor = load('models/preprocessing.joblib')
def get_toxicity_prediction(request):
# For more information about CORS and CORS preflight requests, see
# https://developer.mozilla.org/en-US/docs/Glossary/Preflight_request
# for more information.
# Set CORS headers for the preflight request
if request.method == 'OPTIONS':
# Allows POST requests from any origin with the Content-Type
# header and caches preflight response for an 3600s
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600'
}
return ('', 204, headers)
# Set CORS headers for the main request
headers = {
'Access-Control-Allow-Origin': '*'
}
data = request.get_json(silent=True) # Get data posted as a json
comment_vectorized = preprocessor.transform(data)
pred_proba = model.predict_proba(comment_vectorized)[:,1]
out = list(zip(data,pred_proba))
#prediction = model.predict_proba(data_vectorized)[:,1] # runs globally loaded model on the data
return (json.dumps(out), 200, headers)
```
#### File: toxic-comment-classifier/tests/test_model.py
```python
from toxicity.model import apply_count_vectorizer, run_naive_bayes_classifier
import numpy as np
import pandas as pd
import unittest
from joblib import dump, load
import os
from sklearn.naive_bayes import MultinomialNB
class TestApplyCountVectorizer(unittest.TestCase):
def setUp(self):
data_file_path = "dummy_data/xdummy.txt"
with open(data_file_path) as f:
self.xdummy = f.readlines()
def test_count_vectorizer(self):
count_train, count_valid = apply_count_vectorizer(self.xdummy, self.xdummy)
xmatrix = [[1, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 1, 0]]
self.assertEqual(count_train.shape, (7,4))
self.assertTrue(np.all(count_train.todense()==count_train.todense()))
self.assertTrue(np.all(xmatrix==count_train.todense()))
def test_output_path(self):
output_path = "dummy_data"
count_train, count_valid = apply_count_vectorizer(
self.xdummy,
self.xdummy,
output_path=output_path)
self.assertTrue(os.path.exists(output_path + "/count_vectorizer.joblib"))
os.remove(output_path + "/count_vectorizer.joblib")
class TestRunNaiveBayes(unittest.TestCase):
def setUp(self):
self.count_train = [[1, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 1, 0]]
self.ytrain = [1, 1, 1, 0, 0, 0, 1]
def test_instance(self):
self.assertIsInstance(run_naive_bayes_classifier(self.count_train, self.ytrain),
MultinomialNB)
def test_output_path(self):
output_path = "dummy_data"
nb_classifier = run_naive_bayes_classifier(
self.count_train,
self.ytrain,
output_path=output_path)
self.assertTrue(os.path.exists(output_path + "/nb_classifier.joblib"))
os.remove(output_path + "/nb_classifier.joblib")
```
#### File: toxic-comment-classifier/toxicity/analysis.py
```python
def explore_comment_length(x, y):
texts=x.astype(str)
tokenizer=fast_tokenizer
chunk_size=256
all_ids = []
for i in tqdm(range(0, len(texts), chunk_size)):
text_chunk = texts[i:i+chunk_size].tolist()
encs = tokenizer.encode_batch(text_chunk)
all_ids.extend([enc.ids for enc in encs])
lens = []
for j in range(len(all_ids)):
lens.append(len(all_ids[j]))
plt.hist(lens, 50)
plt.yscale('log')
long_index = (np.array(lens)>500)
long_index = (np.array(lens)>500)
print('Number of comments: ' + str(len(long_index)))
print('Number of toxic comments: ' + str(sum(y)))
print('Number of comments longer than 500 tokens: ' + str(sum(long_index)))
print('Number of toxic comments longer than 500 tokens: ' + str(sum(y[long_index])))
encoded = tokenizer.encode_batch(['man walks down the street happily don''t you think @fire aslkfd291o'])
print(encoded[0].ids)
for id_item in encoded[0].ids:
print(tokenizer.id_to_token(id_item))
#Testcase
fast_tokenizer.token_to_id('[UNK]')
print(fast_tokenizer.token_to_id('Man'))
print(fast_tokenizer.token_to_id('man'))
fast_tokenizer.id_to_token(28995)
```
#### File: toxic-comment-classifier/toxicity/model_embeddings.py
```python
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from keras.models import Sequential
from keras.layers.recurrent import LSTM, GRU,SimpleRNN
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline
from keras.layers import GlobalMaxPooling1D, Conv1D, MaxPooling1D, Flatten, Bidirectional, SpatialDropout1D
from keras.preprocessing import sequence, text
from keras.callbacks import EarlyStopping, History, ModelCheckpoint, TensorBoard, History
from tensorflow.keras.metrics import Accuracy, AUC
def create_embedding_index(glove_embedding_path):
embeddings_index = {}
f = open(glove_embedding_path + 'glove840b300dtxt/glove.840B.300d.txt','r',encoding='utf-8')
for line in tqdm(f):
values = line.split(' ')
word = values[0]
coefs = np.asarray([float(val) for val in values[1:]])
embeddings_index[word] = coefs
f.close()
return embeddings_index
def create_embedding_matrix(word_index, embeddings_index, output_path=None):
# create an embedding matrix for the words we have in the dataset
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in tqdm(word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
if output_path:
np.save(output_path, embedding_matrix)
return embedding_matrix
def load_embeddings(input_path):
embedding_matrix = np.load(input_path)
return embedding_matrix
def build_model(word_index, embedding_matrix, max_len, transformer_trainable=False):
"""
Function for training the model
"""
# A simple LSTM with glove embeddings and one dense layer
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=transformer_trainable))
model.add(LSTM(100, activation="tanh",
recurrent_activation="sigmoid", dropout=0.2, recurrent_dropout=0.1))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy', AUC(curve='PR')])
return model
def tokenize(xtrain, xvalid, max_len):
# using keras tokenizer here
token = text.Tokenizer(num_words=None)
token.fit_on_texts(list(xtrain) + list(xvalid))
xtrain_seq = token.texts_to_sequences(xtrain)
xvalid_seq = token.texts_to_sequences(xvalid)
#zero pad the sequences
xtrain_pad = sequence.pad_sequences(xtrain_seq, maxlen=max_len)
xvalid_pad = sequence.pad_sequences(xvalid_seq, maxlen=max_len)
word_index = token.word_index
return xtrain_pad, xvalid_pad, word_index
``` |
{
"source": "jkchandra/CNNFashionImageClassifer",
"score": 2
} |
#### File: jkchandra/CNNFashionImageClassifer/CNN_VCG19_TransferLearning.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
#%matplotlib inline
#from __future__ import print_function
import keras
from keras.models import Sequential, Model
from keras import optimizers
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, ZeroPadding2D, Convolution2D
from keras.utils import to_categorical
from skimage import io, transform
import glob
import os
import tensorflow as tf
import time
from PIL import Image
import re
import csv
from keras.preprocessing.image import ImageDataGenerator
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text) ]
def importTrain2():
# trainset path
path = 'INSERT PATH'
# resize all pictures to 128 * 128, subjective to adjustment
w = 299
h = 299
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
paths = []
count = 0
for idx, folder in enumerate(cate):
for im in glob.glob(folder + '/*.jpg'):
paths.append(im)
paths.sort(key=natural_keys)
for im in paths:
img = cv2.imread(im)
if (img is None):
print('The image:%s cannot be identified.' % im )
continue
img1 = cv2.resize(img, (w, h))
imgs.append(img1)
labels.append(idx)
count += 1
print("Finished importing folder %s" % folder)
paths = []
imgsnp = np.asarray(imgs)
labelsnp = np.asarray(labels)
result = [imgsnp, labelsnp]
return result
def importTest2():
# testset path
path = test_path
# resize all pictures to 128 * 128, subjective to adjustment
w = img_width
h = img_height
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
paths = []
count = 0
for idx, folder in enumerate(cate):
for im in glob.glob(folder + '/*.jpg'):
paths.append(im)
paths.sort(key=natural_keys)
for im in paths:
img = cv2.imread(im)
if (img is None):
print('The image:%s cannot be identified.' % im )
continue
img1 = cv2.resize(img, (w, h))
imgs.append(img1)
labels.append(idx)
count += 1
print("Finished importing folder %s" % folder)
paths = []
imgsnp = np.asarray(imgs)
labelsnp = np.asarray(labels)
result = [imgsnp, labelsnp]
return result
# Used to create a CNN model
def createModel1():
# Following the VCG16 Architecture for CNN
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,128,128)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
def createModel2():
model = Sequential()
# The first two layers with 32 filters of window size 3x3
model.add(Conv2D(32, (7, 7), padding='same', activation='relu', input_shape=input_shape))
model.add(Conv2D(32, (7, 7), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (7, 7), padding='same', activation='relu'))
model.add(Conv2D(64, (7, 7), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (7, 7), padding='same', activation='relu'))
model.add(Conv2D(64, (7, 7), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nClasses, activation='softmax'))
return model
def exportCSV(pred):
with open('predict_result.csv', 'w', newline='') as f:
header = ['id','category']
# input headers name.
writer = csv.DictWriter(f, header)
writer.writeheader()
for i in range(0,len(pred)):
writer.writerow({
'id': i+1,
'category':pred[i]
})
def plot_acc(history):
plt.plot(history.history['acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend('train', loc='upper left')
plt.show()
plt.savefig('Accuracy.png')
# summarize history for loss
def plot_loss(history):
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend('train', loc='upper left')
plt.show()
plt.savefig('Loss.png')
#Eliminate corrupted files and counts the number of images for the train set
def openTrain(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
count = 0
for idx, folder in enumerate(cate):
for im in glob.glob(folder + '/*.jpg'):
img = cv2.imread(im)
if (img is None):
print('The image:%s cannot be identified.' % im)
newname = im.replace('.jpg', 'txt') # convert file type, so it won't affect fit_generator
os.rename(im, newname)
continue
count += 1
print("Finished reading folder %s" % folder)
print("Total number of uncorrupted images: %d" % count)
return count
#Builds the Datagen, DataGenerator and the Class Dictionary
def build_gen(source):
datagen = ImageDataGenerator(rescale = 1./255, zoom_range=0.3, rotation_range=30, width_shift_range=0.25, height_shift_range=0.25, horizontal_flip=True, vertical_flip=False)
generator = datagen.flow_from_directory(
source,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
class_dictionary = generator.class_indices
return generator, class_dictionary
# MAIN APP STARTS HERE --------------------------------------------------
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = "0"
#session = tf.Session(config=config)
set_session(tf.Session(config=config))
np.random.seed(1337)
#train = importTrain2()
train_path = 'INPUT PATH HERE'
test_path = 'INPUT PATH HERE'
img_height = 244
img_width = 244
NB_IV3_LAYERS_TO_FREEZE = 172
countTrain = openTrain(train_path)
test = importTest2()
#trainImages = train[0]
#trainLabels = train[1]
testImages = test[0]
testLabels = test[1]
#classes = np.unique(trainLabels)
#nClasses = len(classes)
#print(trainImages.shape)
print(testImages.shape)
#nRows, nCols, nDims = trainImages.shape[1:]
#trainData = trainImages.reshape(trainImages.shape[0], nRows, nCols, nDims)
testData = testImages.reshape(testImages.shape[0], img_width, img_height, 3)
input_shape = (img_width, img_height, 3)
# Change to float datatype
#trainData = trainData.astype('float32')
testData = testData.astype('float32')
# Scale the data to lie between 0 to 1
#trainData /= 255
testData /= 255
# Change the labels from integer to categorical data
#trainLabels_onehot = to_categorical(trainLabels)
testLabels_onehot = to_categorical(testLabels)
#model1 = createModel2()
#Creating model using VCG19 -----------------------------------------------------------------------------------------------------------------------------
#model = keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=False, weights='imagenet', input_shape=)
model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(img_width,img_height,3))
#model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = )
#model1 = keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=False, weights='imagenet', input_tensor=None, input_shape=(128,128,3), pooling=None, classes=nClasses)
#hyperparameters
batch_size = 256
epochs = 50
sPerEpoch = int(float(countTrain)/float(batch_size))
#Freezing layer
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(18, activation="softmax")(x)
# creating the final model
model_final = Model(input = model.input, output = predictions)
#compiling the model
model_final.compile(loss = "categorical_crossentropy", optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), metrics=["accuracy"])
#model1.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model_final.summary()
#Data Generator ----------------------------------------------------------------------------------------------------------------------------------------------
#Builds the generator
generator, class_dictionary = build_gen(train_path)
# Trains the model
#model1.fit(trainData, trainLabels_onehot, batch_size=batch_size, epochs=epochs, verbose=1)
history = model_final.fit_generator(generator = generator,
steps_per_epoch = sPerEpoch,
epochs = epochs)
#Predicts the output of the model
prob = model_final.predict(testData)
pred = prob.argmax(axis=-1)
print(pred)
exportCSV(pred)
#Saves the model
model_final.save('my_model.h5')
#Plots the accuracy and the loss function of the model
#plots the graphs
plot_loss(history)
plot_acc(history)
#model2.evaluate(test_data, test_labels_one_hot)
``` |
{
"source": "jkchandra/JobPortalPredictiveAnalytics",
"score": 3
} |
#### File: jkchandra/JobPortalPredictiveAnalytics/lookUpReplace.py
```python
import numpy as np
import pandas as pd
import re
import collections
import csv
#import files
skills_synonyms = pd.read_csv("skills_synonyms.csv")
skills_td_cut = pd.read_csv("skills_td_cut.csv")
#Create a Synonym dictionary
skills_synonyms["sub"] = skills_synonyms["sub"].apply(lambda x: x.split(";"))
skills_synonyms.head()
skills_dict = (skills_synonyms.set_index('main').T.to_dict('index'))['sub']
skills_dict
changedWord = []
def findSynonym(value):
for root, synonymArr in skills_dict.items():
for synonym in synonymArr:
if (synonym == value.lower()):
return root
return value
# For each word in skills_td_cut swap the value of the synonym to the root word
for value in skills_td_cut['word']:
changedWord.append(findSynonym(value))
#overwrite the word with the changed word
skills_td_cut["word"] = changedWord
#write into csv file
skills_td_cut.to_csv("skills_td_cut_syn.csv", index = False)
#Go back to Data
``` |
{
"source": "JKChang2015/Checkio",
"score": 4
} |
#### File: Checkio/Checkio/between_markers.py
```python
def between_markers(text: str, begin: str, end: str) -> str:
"""
returns substring between two given markers
"""
start = text.find(begin) + len(begin) if begin in text else None
stop = text.find(end) if end in text else None
return text[start:stop]
if __name__ == '__main__':
print('Example:')
print(between_markers('What is >apple<', '>', '<'))
# These "asserts" are used for self-checking and not for testing
assert between_markers('What is >apple<', '>', '<') == "apple", "One sym"
assert between_markers("<head><title>My new site</title></head>", "<title>", "</title>") == "My new site", "HTML"
assert between_markers('No[/b] hi', '[b]', '[/b]') == 'No', 'No opened'
assert between_markers('No [b]hi', '[b]', '[/b]') == 'hi', 'No close'
assert between_markers('No hi', '[b]', '[/b]') == 'No hi', 'No markers at all'
assert between_markers('No <hi>', '>', '<') == '', 'Wrong direction'
print('Wow, you are doing pretty good. Time to check it!')
```
#### File: Checkio/Checkio/bigger_price.py
```python
def bigger_price(limit, data):
"""
TOP most expensive goods
"""
res = sorted(data, key=lambda k: k['price'], reverse=True)
return res[:limit]
if __name__ == '__main__':
from pprint import pprint
print('Example:')
pprint(bigger_price(2, [
{"name": "bread", "price": 100},
{"name": "wine", "price": 138},
{"name": "meat", "price": 15},
{"name": "water", "price": 1}
]))
# These "asserts" using for self-checking and not for auto-testing
assert bigger_price(2, [
{"name": "bread", "price": 100},
{"name": "wine", "price": 138},
{"name": "meat", "price": 15},
{"name": "water", "price": 1}
]) == [
{"name": "wine", "price": 138},
{"name": "bread", "price": 100}
], "First"
assert bigger_price(1, [
{"name": "pen", "price": 5},
{"name": "whiteboard", "price": 170}
]) == [{"name": "whiteboard", "price": 170}], "Second"
print('Done! Looks like it is fine. Go and check it')
```
#### File: Checkio/Checkio/date_and_time_convertor.py
```python
def date_time(t):
# replace this for solution
from datetime import datetime
res = datetime.strptime(t, "%d.%m.%Y %H:%M")
if res.hour == 1:
h = res.strftime(' %-H hour')
else:
h = res.strftime(' %-H hours')
if res.minute == 1:
m = res.strftime(' %-M minute')
else:
m = res.strftime(' %-M minutes')
return res.strftime('%-d %B %Y year') + h + m
if __name__ == '__main__':
print("Example:")
print(date_time('01.01.2000 00:00'))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert date_time("01.01.2000 00:00") == "1 January 2000 year 0 hours 0 minutes", "Millenium"
assert date_time("09.05.1945 06:30") == "9 May 1945 year 6 hours 30 minutes", "Victory"
assert date_time("20.11.1990 03:55") == "20 November 1990 year 3 hours 55 minutes", "Somebody was born"
print("Coding complete? Click 'Check' to earn cool rewards!")
```
#### File: Checkio/Checkio/digits_multiplication.py
```python
def checkio(number):
from functools import reduce
l = [int(x) for x in str(number) if x != '0']
return reduce((lambda x, y: x * y), l)
# These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(123405) == 120
assert checkio(999) == 729
assert checkio(1000) == 1
assert checkio(1111) == 1
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
```
#### File: Checkio/Checkio/find_sequence.py
```python
def check(matrix, x, y, dx, dy):
v = matrix[x][y]
for i in range(3):
x += dx
y += dy
if x >= len(matrix) or x < 0:
return False
if y >= len(matrix[0]) or y < 0:
return False
if matrix[x][y] != v:
return False
return True
def checkio(matrix):
for i in range(0, len(matrix)):
for j in range(0, len(matrix[0])):
# horizontally
if check(matrix, i, j, 0, 1):
return True
# vertically
if check(matrix, i, j, 1, 1):
return True
# diagonally NW-SE
if check(matrix, i, j, 1, 0):
return True
# diagonally NE-SW
if check(matrix, i, j, 1, -1):
return True
return False
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio([
[1, 2, 1, 1],
[1, 1, 4, 1],
[1, 3, 1, 6],
[1, 7, 2, 5]
]) == True, "Vertical"
assert checkio([
[7, 1, 4, 1],
[1, 2, 5, 2],
[3, 4, 1, 3],
[1, 1, 8, 1]
]) == False, "Nothing here"
assert checkio([
[2, 1, 1, 6, 1],
[1, 3, 2, 1, 1],
[4, 1, 1, 3, 1],
[5, 5, 5, 5, 5],
[1, 1, 3, 1, 1]
]) == True, "Long Horizontal"
assert checkio([
[7, 1, 1, 8, 1, 1],
[1, 1, 7, 3, 1, 5],
[2, 3, 1, 2, 5, 1],
[1, 1, 1, 5, 1, 4],
[4, 6, 5, 1, 3, 1],
[1, 1, 9, 1, 2, 1]
]) == True, "Diagonal"
```
#### File: Checkio/Checkio/fizz_buzz.py
```python
def checkio(number):
if number % 3 == 0 and number % 5 == 0:
return "Fizz Buzz"
elif number % 3 == 0:
return "Fizz"
elif number % 5 == 0:
return "Buzz"
else:
return str(number)
# These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(15) == "Fizz Buzz", "15 is divisible by 3 and 5"
assert checkio(6) == "Fizz", "6 is divisible by 3"
assert checkio(5) == "Buzz", "5 is divisible by 5"
assert checkio(7) == "7", "7 is not divisible by 3 or 5"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
```
#### File: Checkio/Checkio/index_power.py
```python
def index_power(array, n):
"""
Find Nth power of the element with index N.
"""
if n > len(array) - 1:
return -1
return array[n] ** n
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
```
#### File: Checkio/Checkio/long_repeat.py
```python
def long_repeat(line):
"""
length the longest substring that consists of the same char
"""
count = ['', 0]
maxCount = 0
for i in list(line):
if i == count[0]:
count[1] += 1
else:
count[0] = i
count[1] = 1
if count[1] > maxCount:
maxCount = count[1]
return maxCount
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert long_repeat('sdsffffse') == 4, "First"
assert long_repeat('ddvvrwwwrggg') == 3, "Second"
assert long_repeat('abababaab') == 2, "Third"
assert long_repeat('') == 0, "Empty"
print('"Run" is good. How is "Check"?')
```
#### File: Checkio/Checkio/numbers_factory.py
```python
def checkio(number):
ans = ''
digit = 9
while digit > 1:
if number % digit != 0:
digit -= 1
else:
ans = str(digit) + ans
number /= digit
if number == 1:
return int(ans)
else:
return 0
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio(20) == 45, "1st example"
assert checkio(21) == 37, "2nd example"
assert checkio(17) == 0, "3rd example"
assert checkio(33) == 0, "4th example"
assert checkio(3125) == 55555, "5th example"
assert checkio(9973) == 0, "6th example"
```
#### File: Checkio/Checkio/say_hi.py
```python
def say_hi(name, age):
"""
Hi!
"""
return ('Hi. My name is %s and I\'m %d years old' % (name, age))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert say_hi("Alex", 32) == "Hi. My name is Alex and I'm 32 years old", "First"
assert say_hi("Frank", 68) == "Hi. My name is Frank and I'm 68 years old", "Second"
print('Done. Time to Check.')
```
#### File: Checkio/Checkio/sun_angle.py
```python
def sun_angle(time):
t = time.split(':')
hour, mini = int(t[0]), int(t[1])
if hour < 6 or hour > 18:
return "I don't see the sun!"
else:
n = hour - 6 + (mini / 60)
return n * 180 / 12
if __name__ == '__main__':
print("Example:")
print(sun_angle("07:00"))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert sun_angle("07:00") == 15
assert sun_angle("01:23") == "I don't see the sun!"
assert sun_angle("18:01") == "I don't see the sun!"
print("Coding complete? Click 'Check' to earn cool rewards!")
```
#### File: Checkio/Checkio/time_converter_12_to_24.py
```python
def time_converter(time):
t, sign = time.split(' ')
hour, mini = t.split(':')
if sign == 'a.m.':
if int(hour) < 10:
return '0' + t
elif hour == '12':
return '00' + ':' + mini
else:
return time
else:
if hour == '12':
return t
else:
return str(int(hour) + 12) + ':' + mini
if __name__ == '__main__':
print("Example:")
print(time_converter('12:30 p.m.'))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert time_converter('12:30 p.m.') == '12:30'
assert time_converter('9:00 a.m.') == '09:00'
assert time_converter('11:15 p.m.') == '23:15'
print("Coding complete? Click 'Check' to earn cool rewards!")
``` |
{
"source": "JKChang2015/Machine_Learning",
"score": 3
} |
#### File: axe/KNN/KNN.py
```python
TANG/axe/KNN/KNN.py
# KNN
# Created by JKChang
# 27/01/2020, 15:53
# Tag:
# Description:
import operator
import matplotlib.pyplot as plt
from numpy import *
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
def drawGraph(group, labels):
for i, label in enumerate(labels):
x = group[i][0]
y = group[i][1]
plt.scatter(x, y, marker='x', color='red')
plt.text(x + 0.01, y + 0.01, label, fontsize=9)
plt.show()
def classify(newInput, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
## step 1: calculate Euclidean distance
# tile(A, reps): Construct an array by repeating A reps times
# the following copy numSamples rows for dataSet
diffMat = tile(newInput, (dataSetSize, 1))- dataSet # Subtract element-wise
sqDiffMat = diffMat ** 2 # squared for the subtract
sqDistances = sqDiffMat.sum(axis=1) # sum is performed by row
distances = sqDistances ** 0.5
## step 2: sort the distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
## step 3: choose the min k distance
voteIlabel = labels[sortedDistIndicies[i]]
## step 4: count the times labels occur
# when the key voteLabel is not in dictionary classCount, get() will return 0
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
## step 5: the max voted class will return
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
group, labels = createDataSet()
drawGraph(group, labels)
print(classify([0.5, 0.5], group, labels, 2))
```
#### File: Yudi TANG/KNN/knn.py
```python
import numpy as np
import pandas as pd
features = [
'accommodates', 'bedrooms', 'bathrooms', 'beds', 'price', 'minimum_nights',
'maximum_nights', 'number_of_reviews'
]
dc_listings = pd.read_csv('listings.csv')[features]
dc_listings['price'] = dc_listings['price'].str.replace("\$|,", '').astype(float)
train_df = dc_listings.copy().iloc[:2792]
test_df = dc_listings.copy().iloc[2792:]
def predict_price(new_listing_value, feature_column):
print(new_listing_value)
temp_df = train_df
temp_df['difference'] = np.abs(dc_listings[feature_column] - new_listing_value) # difference
temp_df = temp_df.sort_values('difference') # sort according to the difference
knn_5 = temp_df.price.iloc[:5]
predicted_price = knn_5.mean() # mean value of 5 nearest ones
return (predicted_price)
test_df['predicted_price'] = test_df.accommodates.apply(predict_price, feature_column='accommodates')
``` |
{
"source": "JKChang2015/semantic_dev_tech_test",
"score": 3
} |
#### File: semantic_dev_tech_test/Exercises/wine.py
```python
from owlready2 import get_ontology
from owlready2 import sync_reasoner
# List all grape growing regions (in the ontology)
def get_all_regions():
'''
Get all grape growing regions
:return: List of grape growing regions
'''
onto = get_ontology('file://./wine.owl').load()
with onto:
sync_reasoner()
return [x.name for x in onto.region.instances()]
# List all varietals (in the ontology)
def get_all_varietals():
'''
Get all grape varietals, include also_called
:return: List of grape varietals
'''
onto = get_ontology('file://./wine.owl').load()
with onto:
sync_reasoner()
subs = onto.varietal.descendants()
subs.remove(onto.varietal)
res = [x.name for x in subs]
for c in subs:
if len(c.also_called) > 0:
res += c.also_called
return res
# List all types (classes) of wine (in the ontology)
def get_all_types():
'''
List all types of wine
:return: List of wine types
'''
onto = get_ontology('file://./wine.owl').load()
with onto:
sync_reasoner()
subs = onto.color.descendants()
subs.remove(onto.color)
return [x.name for x in subs]
# Query for wine types and individual wines by: colour, varietal, region
def query(color=None, varietal=None, region=None):
'''
:param color: wine color
:param varietal: grape varietal
:param region: region
:return: types and individuals by given colour, varietal and region
'''
onto = get_ontology('file://./wine.owl').load()
with onto:
sync_reasoner()
wines = onto.wine.descendants()
wines.remove(onto.wine)
res = list(wines.copy())
for wine in wines:
if color:
wine_c = [x.name.lower() for x in wine.has_color]
if color.lower() not in wine_c:
res.remove(wine)
continue
if varietal:
wine_v = [x.name.lower() for x in wine.made_from]
if varietal.lower() not in wine_v:
res.remove(wine)
continue
if region:
wine_r = [x.name.lower() for x in wine.grown_in]
if region.lower() not in wine_r:
res.remove(wine)
continue
for i in res:
try:
if len(i.instances()) > 0:
inst = i.instances()
res += inst
except:
pass
return list(set([x.name for x in res]))
``` |
{
"source": "jkchen2/JshBot",
"score": 3
} |
#### File: JshBot/jshbot/parser.py
```python
import discord
import re
from discord.abc import PrivateChannel
from jshbot import commands, utilities, logger
from jshbot.commands import ArgTypes
from jshbot.exceptions import ConfiguredBotException, BotException
CBException = ConfiguredBotException('Parser')
def split_parameters(parameters, include_quotes=False, quote_list=False):
"""Splits up the given parameters by spaces and quotes.
Keyword arguments:
include_quotes -- The quotes attached to the parameters will be included.
quote_list -- Gets a list of indices that represent parameters that were
grouped because of quotes.
"""
if not parameters:
if quote_list:
return ([], [])
else:
return []
split = re.split('( +)', parameters)
quoted_indices = []
joined_split = []
add_start = -1
add_end = -1
for index, entry in enumerate(split):
if entry.startswith('"'):
add_start = index
if (entry.endswith('"') and not entry.endswith('\\"') and
len(entry) > 1 and add_start != -1):
add_end = index + 1
if add_start == -1: # Add entry normally
joined_split.append(entry)
elif add_end != -1: # Join entries in quotes
quoted_indices.append(len(joined_split))
combined = ''.join(split[add_start:add_end])
if include_quotes:
joined_split.append(combined)
else:
joined_split.append(combined[1:-1])
add_start = -1
add_end = -1
if add_start != -1: # Unclosed quote
logger.warn("Detected an unclosed quote: " + split[add_start])
joined_split.append(''.join(split[add_start:index + 1]))
if quote_list:
return (joined_split, quoted_indices)
else:
return joined_split
async def match_subcommand(bot, command, parameters, message, match_closest=False):
"""Matches the given parameters to a valid subcommand from the command.
Returns a tuple of the subcommand, options, and arguments.
If match_closest is True, returns the closest matching subcommand or None.
No processing (conversion, checking) is done, and returns only the subcommand or None.
"""
parameters, quoted_indices = split_parameters(parameters, quote_list=True)
closest_index = -1
closest_index_matches = 0
closest_index_error = None
stripped_parameters = parameters[::2]
for subcommand in command.subcommands:
current_index = 0
matches = 0
options = {}
arguments = []
last_opt_index = -1
arg_index = -1
used_opts = []
exhausted_opts = len(subcommand.opts) == 0
not_found_error = None
while current_index < len(stripped_parameters):
current = stripped_parameters[current_index]
if not exhausted_opts: # Check opts
if current_index * 2 in quoted_indices: # Quoted elements are always arguments
exhausted_opts = True
found_opt = subcommand.opts.get(current.lower(), None)
if not exhausted_opts and found_opt:
if subcommand.strict_syntax: # Check strict syntax
if found_opt.index < last_opt_index: # Syntax out of order
exhausted_opts = True
else:
last_opt_index = found_opt.index
if not exhausted_opts:
if found_opt.name in options: # Duplicate. Skip to args
exhausted_opts = True
else: # Check for attached argument
if found_opt.attached: # Required attached argument
if current_index + 1 >= len(stripped_parameters):
not_found_error = (
'Option {opt.name_string} requires an attached parameter, '
'{opt.attached_string}.'.format(opt=found_opt))
matches += 3
else:
current_index += 1
options[found_opt.name] = stripped_parameters[current_index]
matches += 6
else: # No attached argument required
options[found_opt.name] = None
matches += 5
used_opts.append(found_opt)
else: # Option not found. Skip to args
exhausted_opts = True
if exhausted_opts: # No more matching opts - check for optional opts
current_index -= 1 # Search args where we left off
remaining_opts = [o for o in subcommand.opts.values() if o not in used_opts]
for opt in remaining_opts:
if opt.optional:
matches += 1
if opt.always_include:
options[opt.name] = opt.default
else: # Not optional. Unfit subcommand
not_found_error = 'Option {} is required.'.format(opt.name_string)
break
else: # Check args
arg_index += 1
if arg_index >= len(subcommand.args): # Too many arguments
not_found_error = 'Too many arguments.'
else:
matches += 1
arg = subcommand.args[arg_index]
if arg.argtype in (ArgTypes.SINGLE, ArgTypes.OPTIONAL):
arguments.append(current)
else: # Instant finish grouped arguments
if arg.argtype in (ArgTypes.SPLIT, ArgTypes.SPLIT_OPTIONAL):
arguments += stripped_parameters[current_index:]
else: # Merged
split_arguments = []
quote_index = current_index * 2
for segment in parameters[current_index * 2:]:
if quote_index in quoted_indices: # Add quotes back in
split_arguments.append('"{}"'.format(segment))
else:
split_arguments.append(segment)
quote_index += 1
arguments += [''.join(split_arguments)]
break
if not_found_error: # Skip rest of loop and evaluate matches
break
current_index += 1
# Finished opt/arg while loop
if not not_found_error and not exhausted_opts: # Opts remain
remaining_opts = [o for o in subcommand.opts.values() if o not in used_opts]
for opt in remaining_opts:
if opt.optional:
matches += 1
if opt.always_include:
options[opt.name] = opt.default
else: # Not optional. Unfit subcommand
not_found_error = 'Option {} is required.'.format(opt.name_string)
break
if not not_found_error and arg_index < len(subcommand.args) - 1: # Optional arguments
arg_index += 1
arg = subcommand.args[arg_index]
if arg.argtype is ArgTypes.OPTIONAL:
matches += 1
while (arg and arg.argtype is ArgTypes.OPTIONAL and
arg_index < len(subcommand.args)):
arguments.append(arg.default)
arg_index += 1
try:
arg = subcommand.args[arg_index]
except:
arg = None
if arg and arg.argtype in (ArgTypes.SPLIT_OPTIONAL, ArgTypes.MERGED_OPTIONAL):
matches += 1
arguments.append(arg.default)
elif arg:
not_found_error = 'No value given for argument {}.'.format(arg.help_string)
if not not_found_error: # Check for message attachment
if subcommand.attaches:
if message.attachments or subcommand.attaches.optional:
matches += 6
else:
not_found_error = 'Missing attachment **__`{name}`__**'.format(
name=subcommand.attaches.name)
elif message.attachments: # No attachment argument, but attachment was provided
not_found_error = 'No attachment required, but one was given.'
if not_found_error: # Find closest subcommand
if matches > closest_index_matches:
closest_index = subcommand.index
closest_index_matches = matches
closest_index_error = not_found_error
else: # Subcommand found. Convert and check
if subcommand.confidence_threshold is not None: # Confidence threshold
if closest_index_matches >= subcommand.confidence_threshold:
continue # Skip valid match due to low confidence
# No additional processing
if match_closest:
if matches <= 1 and matches < closest_index_matches: # No confidence
continue
else:
return subcommand
# Cannot match parameters in a direct message if disabled
elif not subcommand.allow_direct and isinstance(message.channel, PrivateChannel):
return subcommand, {}, []
# Fill in options and arguments
else:
for option_name, value in options.items(): # Check options
current_opt = subcommand.opts[option_name]
if value is not None:
new_value = await current_opt.convert_and_check(bot, message, value)
if new_value is not None:
options[option_name] = new_value
for index, pair in enumerate(zip(subcommand.args, arguments)): # Check arguments
arg, value = pair
if (value is not None
or arg.argtype in (ArgTypes.SINGLE, ArgTypes.SPLIT, ArgTypes.MERGED)):
if arg.argtype not in (ArgTypes.SINGLE, ArgTypes.OPTIONAL):
new_values = await arg.convert_and_check(
bot, message, arguments[index:])
arguments = arguments[:index] + new_values
break
else:
new_value = await arg.convert_and_check(bot, message, value)
arguments[index] = new_value
return subcommand, options, arguments
# Looped through all subcommands. Not found
if closest_index == -1 or closest_index_matches <= 1: # Low confidence
guess = command
else:
guess = command.subcommands[closest_index]
if match_closest:
return guess
else:
if isinstance(guess, commands.SubCommand):
syntax_error = 'Invalid syntax: {}'.format(closest_index_error)
else:
guess = command
syntax_error = 'Invalid syntax.'
invoker = utilities.get_invoker(bot, guild=message.guild)
raise CBException(
syntax_error, embed_fields=guess.help_embed_fields, embed_format={'invoker': invoker})
async def fill_shortcut(bot, shortcut, parameters, message):
parameters = split_parameters(parameters, include_quotes=True)
stripped_parameters = parameters[::2]
arguments_dictionary = {}
current_index = -1
for current_index, current in enumerate(stripped_parameters):
if current_index >= len(shortcut.args):
invoker = utilities.get_invoker(bot, guild=message.guild)
raise CBException(
"Too many arguments.", embed_fields=shortcut.help_embed_fields,
embed_format={'invoker': invoker})
else:
arg = shortcut.args[current_index]
if arg.argtype in (ArgTypes.SINGLE, ArgTypes.OPTIONAL):
arguments_dictionary[arg.name] = current
else: # Instant finish grouped arguments
if arg.argtype in (ArgTypes.SPLIT, ArgTypes.SPLIT_OPTIONAL):
arguments_dictionary[arg.name] = ''.join(stripped_parameters[current_index:])
else: # Merged
arguments_dictionary[arg.name] = ''.join(parameters[current_index * 2:])
break
# TODO: TEST THIS!
logger.debug("Finished shortcut loop. %s", arguments_dictionary)
if current_index < len(shortcut.args) - 1: # Check for optional arguments
arg = shortcut.args[current_index + 1]
if arg.argtype is ArgTypes.OPTIONAL:
while (arg and arg.argtype is ArgTypes.OPTIONAL and
current_index < len(shortcut.args)):
arguments_dictionary[arg.name] = '' if arg.default is None else arg.default
current_index += 1
try:
arg = shortcut.args[current_index]
except:
arg = None
if arg and arg.argtype in (ArgTypes.SPLIT_OPTIONAL, ArgTypes.MERGED_OPTIONAL):
arguments_dictionary[arg.name] = '' if arg.default is None else arg.default
elif arg:
invoker = utilities.get_invoker(bot, guild=message.guild)
raise CBException(
"Not enough arguments.", embed_fields=shortcut.help_embed_fields,
embed_format={'invoker': invoker})
logger.debug("Finished checking for optional arguments. %s", arguments_dictionary)
for arg in shortcut.args:
value = arguments_dictionary[arg.name]
if value is not None:
new_value = await arg.convert_and_check(bot, message, value)
arguments_dictionary[arg.name] = new_value
return shortcut.replacement.format(**arguments_dictionary).strip()
async def parse(bot, command, parameters, message):
"""Parses the parameters and returns a tuple.
This matches the parameters to a subcommand.
The tuple is (base, subcommand_index, options, arguments).
"""
parameters = parameters.strip() # Safety strip
if isinstance(command, commands.Shortcut): # Fill replacement string
logger.debug("Filling shortcut...")
parameters = await fill_shortcut(bot, command, parameters, message)
command = command.command # command is actually a Shortcut. Not confusing at all
logger.debug("Shortcut filled to: [%s]", parameters)
subcommand, options, arguments = await match_subcommand(bot, command, parameters, message)
return (subcommand, options, arguments)
# return (command, subcommand.index, options, arguments, command.keywords)
async def guess_command(
bot, text, message, safe=True, substitute_shortcuts=True, suggest_help=True):
"""Guesses the closest command or subcommand.
Keyword arguments:
safe -- Returns None if no command was guessed
substitute_shortcuts -- Fills in the shortcut (if found) and guesses a command from that
suggest_help -- Suggests that the user run the regular help command
"""
if not text:
if safe:
return None
else:
raise CBException("No guess text.")
text = text.strip()
split_content = text.split(' ', 1)
if len(split_content) == 1:
split_content.append('')
base, parameters = split_content
base = base.lower()
try:
command = bot.commands[base]
except KeyError:
if safe:
return None
else:
if suggest_help:
invoker = utilities.get_invoker(bot, message=message)
additional = ' To see the menu, type `{}help`'.format(invoker)
else:
additional = ''
raise CBException("Invalid base.{}".format(additional))
if isinstance(command, commands.Shortcut) and substitute_shortcuts:
try:
parameters = await fill_shortcut(bot, command, parameters, message)
command = command.command
except BotException:
return command.command
if not parameters or isinstance(command, commands.Shortcut):
return command
else:
return await match_subcommand(bot, command, parameters, message, match_closest=True)
```
#### File: JshBot/jshbot/utilities.py
```python
import asyncio
import datetime
import functools
import io
import os
import shutil
import socket
import time
import zipfile
import aiohttp
import discord
from urllib.parse import urlparse
from psycopg2.extras import Json
from jshbot import data, configurations, core, logger
from jshbot.exceptions import BotException, ConfiguredBotException
CBException = ConfiguredBotException('Utilities')
# Voice region time offsets (no DST)
VOICE_REGIONS = {
'us-west': -8,
'us-east': -5,
'us-south': -6,
'us-central': -6,
'eu-west': 1, # West European Summer Time
'eu-central': 2, # Central European Summer Time
'singapore': 8,
'london': 0,
'sydney': 10,
'amsterdam': 2, # CEST
'frankfurt': 2, # CEST
'brazil': -3,
'vip-us-east': -5,
'vip-us-west': -8,
'vip-amsterdam': 2 # CEST
}
# Integer to emoji conversion
NUMBER_EMOJIS = [
':zero:', ':one:', ':two:', ':three:', ':four:', ':five:',
':six:', ':seven:', ':eight:', ':nine:', ':keycap_ten:'
]
class BaseConverter():
def __init__(self):
self.error_reason = "Unknown conversion error"
def get_convert_error(self, *args):
return self.error_reason
class MemberConverter(BaseConverter):
def __init__(self, server_only=True, live_check=None, attribute=None):
self.server_only = server_only
self.live_check = live_check
self.attribute = attribute
super().__init__()
async def __call__(self, bot, message, value, *a):
if self.live_check:
self.server_only = self.live_check(bot, message, value, *a)
guild = message.guild if self.server_only else None
try:
return await data.fetch_member(
bot, value, guild=guild, strict=self.server_only, attribute=self.attribute)
except BotException as e:
self.set_error_reason(e, 'member')
def set_error_reason(self, error, convert_type):
if error.error_details.startswith('Duplicate'):
pre_format = "Duplicate {}s found.".format(convert_type)
else:
pre_format = "{} '{}' not found.".format(convert_type.title(), error.other_details)
self.error_reason = pre_format + ' Please use a mention or raw user ID.'
assert False # To trigger the conversion error
class ChannelConverter(MemberConverter):
def __init__(self, server_only=True, live_check=None, constraint=None, attribute=None):
"""Constraint can be used to specify only text or voice channels.
The constraitn can either be discord.VoiceChannel or discord.TextChannel
"""
self.server_only = server_only
self.constraint = constraint
super().__init__(live_check=live_check, attribute=attribute)
def __call__(self, bot, message, value, *a):
if self.live_check:
guild = self.live_check(bot, message, value, *a)
else:
guild = message.guild if self.server_only else None
try:
return data.get_channel(
bot, value, guild=guild, strict=self.server_only,
constraint=self.constraint, attribute=self.attribute)
except BotException as e:
self.set_error_reason(e, 'channel')
class RoleConverter(MemberConverter):
def __init__(self, attribute=None):
super().__init__(attribute=attribute)
def __call__(self, bot, message, value, *a):
try:
return data.get_role(bot, value, message.guild, attribute=self.attribute)
except BotException as e:
self.set_error_reason(e, 'role')
class PercentageConverter(BaseConverter):
def __init__(self, accuracy=3):
self.accuracy = int(accuracy)
super().__init__()
def __call__(self, bot, message, value, *a):
cleaned = value.strip('%')
try:
converted = float(cleaned)
except:
raise CBException("Must be a percentage.")
else:
if self.accuracy is not None:
converted = round(converted, self.accuracy)
return converted/100
class HexColorConverter(BaseConverter):
def __call__(self, bot, message, value, *a):
try:
return discord.Color(int(value.lower()[-6:], 16))
except:
raise CBException("Invalid hex color.")
def add_bot_permissions(bot, plugin_name, **permissions):
"""Adds the given permissions to the bot for authentication generation."""
dummy = discord.Permissions()
for permission in permissions:
try:
getattr(dummy, permission.lower())
except: # Permission not found
raise CBException("Permission '{}' does not exist".format(permission))
current = data.get(
bot, plugin_name, 'permissions', create=True, volatile=True)
if current is None:
data.add(bot, plugin_name, 'permissions', permissions, volatile=True)
def get_permission_bits(bot):
"""Calculates all of the permissions for each plugin."""
dummy = discord.Permissions()
for plugin in bot.plugins.keys():
for permission in data.get(
bot, plugin, 'permissions', volatile=True, default={}):
setattr(dummy, permission.lower(), True)
return dummy.value
async def can_interact(bot, member, channel_id=None):
"""Checks that the given member can be interacted with.
This ensures that the user is:
Not a bot
Not blocked in the server
Additionally, if the user is a member (guild exists):
Not in a blocked channel
Not blacklisted by the botowners
If given a channel ID, also checks that the bot is not muted in there
This also checks for maintenace mode
"""
if data.is_owner(bot, member.id):
return True
elif member.bot or member.id in data.get(bot, 'core', 'blacklist', default=[]):
return False
elif bot.maintenance_mode:
return False
# Guild specific check
guild = getattr(member, 'guild', None)
if guild:
if data.is_mod(bot, member=member):
return True
guild_data = data.get(bot, 'core', None, guild.id, default={})
if (guild_data.get('muted', False) or
(channel_id in guild_data.get('muted_channels', [])) or
(member.id in guild_data.get('blocked', []))):
return False
return True
async def download_url(
bot, url, headers={'User-Agent': 'Mozilla/5.0'},
include_name=False, extension=None, filename=None, use_fp=False):
"""Asynchronously downloads the given file to the temp folder.
Returns the path of the downloaded file. If include_name is True, returns
a tuple of the file location and the file name.
If use_fp, this will use a BytesIO object instead of downloading to a file.
"""
if use_fp:
fp = io.BytesIO()
else:
if not filename:
filename = get_cleaned_filename(url, extension=extension)
file_location = '{0}/temp/{1}'.format(bot.path, filename)
try:
response_code, downloaded_bytes = await get_url(bot, url, get_bytes=True, headers=headers)
if response_code != 200:
raise CBException("Failed to download file.", response_code)
if use_fp:
fp.write(downloaded_bytes)
fp.seek(0)
return fp
else:
with open(file_location, 'wb') as download:
download.write(downloaded_bytes)
if include_name:
return (file_location, filename)
else:
return file_location
except Exception as e:
raise CBException("Failed to download the file.", e=e)
def delete_temporary_file(bot, filename, safe=True):
"""Deletes the given file from the temp folder."""
try:
os.remove('{0}/temp/{1}'.format(bot.path, filename))
except Exception as e:
if not safe:
raise CBException("File could not be deleted.", e=e)
def get_temporary_file(bot, filename, safe=True):
"""Gets the filename from the temp folder."""
test_path = '{0}/temp/{1}'.format(bot.path, filename)
if os.path.isfile(test_path):
return test_path
elif safe:
return None
else:
raise CBException("Temporary file not found.")
def add_temporary_file(bot, bytes_io, filename, seek=True, overwrite=True, safe=False):
"""Dumps the binary file into the temp folder."""
test_path = '{0}/temp/{1}'.format(bot.path, filename)
if os.path.isfile(test_path) and not overwrite and not safe:
raise CBException("Temporary file already exists.")
else:
try:
if seek and bytes_io.seekable():
bytes_io.seek(0)
write_type = 'w' if isinstance(bytes_io, io.StringIO) else 'wb'
with open(test_path, write_type) as temp_file:
temp_file.write(bytes_io.read())
except Exception as e:
if not safe:
raise CBException("Failed to write temporary file.", e=e)
def get_plugin_file(bot, filename, safe=True):
"""Gets the plugin file in the plugin_data directory."""
test_path = '{0}/plugins/plugin_data/{1}'.format(bot.path, filename)
if os.path.isfile(test_path):
return test_path
elif safe:
return None
else:
raise CBException("Plugin file '{}' not found.".format(filename))
def valid_url(url):
"""Checks that the given URL is Discord embed friendly. Or at least, it tries."""
def _valid_string(segment, main=True):
if not len(segment):
return False
for c in [ord(it.lower()) for it in segment]:
if not (97 <= c <= 122 or (main and (48 <= c <= 57 or c == 45))):
return False
return True
test = urlparse(url)
if not (test.scheme and test.netloc and '.' in test.netloc):
return False
# Discord only accepts http or https
if test.scheme not in ('http', 'https'):
return False
# Test for valid netloc
netloc_split = test.netloc.split('.')
if (len(netloc_split) < 2):
return False # http://foo
tld = test.netloc.split('.')[-1]
if not (len(tld) >= 2 and _valid_string(tld, main=False)):
return False # http://foo.123
for segment in netloc_split[:-1]:
if not _valid_string(segment):
return False # http://foo..bar or http://fo*o.bar
for c in url:
if not 33 <= ord(c) <= 126:
return False # non-ASCII only URLs
return True
async def get_url(bot, urls, headers={}, read_response=True, get_bytes=False):
"""Uses aiohttp to asynchronously get a url response, or multiple."""
async def fetch(url, read_method='text'):
if not url: # Why
return (None, None)
async with session.get(str(url)) as response:
return (
response.status,
(await getattr(response, read_method)()) if read_response else response)
read_method = 'read' if get_bytes else 'text'
try:
async with aiohttp.ClientSession(headers=headers, loop=bot.loop) as session:
if isinstance(urls, (list, tuple)):
result = await parallelize(fetch(url, read_method) for url in urls)
else:
result = await fetch(urls, read_method)
return result
except Exception as e:
raise CBException("Failed to retrieve a URL.", e=e)
async def request(
bot, method, url, session_kwargs={}, method_kwargs={},
response_method='text', response_method_kwargs={}):
"""Wraps aiohttp methods for making a request."""
async with aiohttp.ClientSession(**session_kwargs) as session:
async with getattr(session, method)(url, **method_kwargs) as response:
return (response, await getattr(response, response_method)(**response_method_kwargs))
async def upload_to_discord(bot, fp, filename=None, rewind=True, close=False):
"""Uploads the given file-like object to the upload channel.
If the upload channel is specified in the configuration files, files
will be uploaded there. Otherwise, a new guild will be created, and
used as the upload channel."""
channel_id = configurations.get(bot, 'core', 'upload_channel')
if not channel_id: # Check to see if a guild was already created
channel_id = data.get(bot, 'core', 'upload_channel')
channel = data.get_channel(bot, channel_id, safe=True)
# TODO: Remove. Guild creation via bots is a whitelisted process
if channel is None: # Create guild
logger.debug("Creating guild for upload channel...")
try:
guild = await bot.create_guild('uploads')
except Exception as e:
raise CBException(
"Failed to create upload guild. This bot is not whitelisted "
"to create guilds.", e=e)
data.add(bot, 'core', 'upload_channel', guild.id)
channel = bot.get_channel(guild.id)
if channel is None: # Shouldn't happen
raise CBException("Failed to get upload channel.")
try:
discord_file = discord.File(fp, filename=filename)
message = await channel.send(file=discord_file)
upload_url = message.attachments[0].url
except Exception as e:
raise CBException("Failed to upload file.", e=e)
try:
if close:
fp.close()
elif rewind:
fp.seek(0)
except:
pass
return upload_url
async def upload_logs(bot):
"""Uploads any log files to the debug channel."""
log_zip_location = '{0}/temp/debug_log_files.zip'.format(bot.path)
log_zip_file = zipfile.ZipFile(log_zip_location, mode='w')
log_location = '{0}/temp/debug_logs.txt'.format(bot.path)
compression = zipfile.ZIP_DEFLATED
if os.path.exists(log_location):
log_zip_file.write(
log_location, arcname=os.path.basename(log_location),
compress_type=compression)
for log_number in range(5):
next_location = log_location + '.{}'.format(log_number + 1)
if os.path.exists(next_location):
log_zip_file.write(
next_location, arcname=os.path.basename(next_location),
compress_type=compression)
log_zip_file.close()
debug_channel = bot.get_channel(configurations.get(bot, 'core', 'debug_channel'))
discord_file = discord.File(log_zip_location, filename='all_logs.zip')
await debug_channel.send(content='All logs:', file=discord_file)
async def parallelize(coroutines, return_exceptions=False, propagate_error=False):
"""Uses asyncio.gather to "parallelize" the coroutines (not really)."""
try:
return await asyncio.gather(*coroutines, return_exceptions=return_exceptions)
except Exception as e:
if propagate_error:
raise e
else:
raise CBException("Failed to await coroutines.", e=e)
def future(function, *args, **kwargs):
"""Returns the given function as a future."""
loop = asyncio.get_event_loop()
function = functools.partial(function, *args, **kwargs)
return loop.run_in_executor(None, function)
# TODO: Deprecate in favor of clean_text
def get_cleaned_filename(name, cleaner=False, limit=200, extension=None):
"""Cleans up the filename to a limited set of ASCII characters."""
if extension:
extension = '.{}'.format(extension)
limit -= len(extension)
else:
extension = ''
cleaned_list = []
for char in name:
if cleaner: # Does not include underscores or dashes
if char.isalnum():
cleaned_list.append(char)
else:
if char.isalnum() or ord(char) in (95, 45):
cleaned_list.append(char)
if len(cleaned_list) > limit: # Because Windows file limitations
cleaned_list = cleaned_list[:limit]
return ''.join(cleaned_list).lower() + extension
def clean_text(text, level=2, limit=200, custom=None, lowercase=True):
"""Cleans up the text to a limited set of ASCII characters.
level 0: Standard ASCII characters or alphanumeric unicode
level 1: Alphanumeric (unicode) or dash, underscore, space
level 2: Alphanumeric (unicode) or dash, underscore (default)
level 3: Alphanumeric (unicode) only
level 4: Alphanumeric (ASCII) only
"""
if custom:
sifter = custom
else:
sifter = (
lambda x: x if (x.isalnum() or 32 <= ord(x) <= 126) else '',
lambda x: x if (x.isalnum() or ord(x) in (95, 45, 32)) else '',
lambda x: x if (x.isalnum() or ord(x) in (95, 45)) else '',
lambda x: x if x.isalnum() else '',
lambda x: x if (x.isalnum() and ord(x) < 127) else ''
)[level]
cleaned = ''.join(sifter(char) for char in text[:limit])
return cleaned.lower() if lowercase else cleaned
def filter_everyone(text):
"""Removes mentionable instances of @everyone and @here."""
return text.replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere')
def get_player(bot, guild_id):
"""Gets the voice player on the given guild. None otherwise."""
return data.get(bot, 'core', 'voice_player', guild_id=guild_id, volatile=True)
def set_player(bot, guild_id, player):
"""Sets the voice player of the given guild."""
data.add(bot, 'core', 'voice_player', player, guild_id=guild_id, volatile=True)
async def join_and_ready(bot, voice_channel, is_mod=False, reconnect=False):
"""Joins the voice channel and stops any audio playing.
Returns the voice_client object from voice_channel.connect()
"""
guild = voice_channel.guild
muted_channels = data.get(bot, 'core', 'muted_channels', guild_id=guild.id, default=[])
if voice_channel == guild.afk_channel:
raise CBException("This is the AFK channel.")
if voice_channel.id in muted_channels and not is_mod:
raise CBException("The bot is muted in this voice channel.")
if reconnect:
try:
await stop_audio(bot, guild)
except:
pass
voice_client = guild.voice_client
if not voice_client:
try:
voice_client = await asyncio.wait_for(
voice_channel.connect(timeout=5.0, reconnect=False),
timeout=10.0, loop=bot.loop)
except asyncio.TimeoutError as e:
try:
await stop_audio(bot, guild, force=True)
except:
pass
raise CBException("Timed out trying to join the voice channel.")
except Exception as e:
try:
await stop_audio(bot, guild)
except:
pass
raise CBException("Failed to join the voice channel.", e=e)
if voice_client.is_playing():
voice_client.stop()
else:
if voice_client.is_playing():
voice_client.stop()
if voice_client.channel != voice_channel:
try:
await voice_client.move_to(voice_channel)
except Exception as e:
try:
await stop_audio(bot, guild)
except:
pass
raise CBException("Failed to move to the voice channel.", e=e)
return voice_client
async def stop_audio(bot, guild, member=None, safe=True, disconnect=True, force=False):
"""Stops any playing audio.
Keyword arguments:
member -- Checks that the the bot is connected to the member's
voice channel. The safe option overrides this.
safe -- Prevents exceptions from being thrown. Can be seen as 'silent'.
disconnect -- Disconnects from the voice channel.
force -- If disconnect is set, forces the disconnect.
"""
voice_client = guild.voice_client
if not voice_client:
if safe:
return
else:
raise CBException("Bot not connected to a voice channel.")
member_voice = member.voice.channel if member and member.voice else None
if member and voice_client.channel != member_voice:
if not safe:
raise CBException("Bot not connected to your voice channel.")
else:
voice_client.stop()
if disconnect:
await voice_client.disconnect(force=force)
async def play_and_leave(bot, guild, audio_source, delay=30):
"""Plays the audio source, and then leaves the voice channel.
If the delay is negative, the bot will not leave the voice channel.
"""
voice_client = guild.voice_client
if voice_client is None:
raise CBException("Voice client is missing.")
async def _leave():
await asyncio.sleep(delay)
test_voice_client = guild.voice_client
if not test_voice_client or test_voice_client.source != audio_source:
logger.debug("Voice client changed. Automatic disconnect cancelled.")
else:
try:
await voice_client.disconnect()
except Exception as e:
raise CBException("Failed to disconnect from the voice channel.", e=e)
def _start_leave(error):
if error:
raise CBException("Player failed to finish.", error)
elif delay >= 0:
asyncio.ensure_future(_leave(), loop=bot.loop)
voice_client.play(audio_source, after=_start_leave)
def get_time_string(total_seconds, text=False, full=False, resolution=2):
"""Gets either digital-clock-like time or time in plain English."""
total_seconds = int(total_seconds)
values = [
#('weeks', int(total_seconds / 604800)), # Weeks are more confusing than days
('days', int(total_seconds / 86400)),
('hours', int((total_seconds % 86400) / 3600)),
('minutes', int((total_seconds % 3600) / 60)),
('seconds', int(total_seconds % 60))
]
result = []
if text:
for scale, value in values:
if value > 0:
if not full and len(result) == 1 and values[0][1] >= 7:
break # Lower resolution if there are several days already
result.append('{} {}{}'.format(
value, scale[:-1], '' if value == 1 else 's'))
if not full and len(result) >= resolution:
break
for it in range(len(result) - 2):
result.insert((it * 2) + 1, ', ')
if len(result) > 1:
result.insert(-1, ' and ')
else:
for scale, value in values:
if value > 0 or full or scale == 'minutes':
if scale in ('hours', 'minutes', 'seconds') and full:
format_string = '{:0>2}'
else:
format_string = '{}'
result.append(format_string.format(value))
full = True
return ('' if text else ':').join(result)
def get_formatted_message(message):
"""Gets a log-friendly format of the given message."""
if message.edited_at:
edited = ' (edited {})'.format(message.edited_at)
else:
edited = ''
if message.attachments:
urls = [attachment.url for attachment in message.attachments]
attached = ' (attached {})'.format(urls)
else:
attached = ''
return ("{0.author.name}#{0.author.discriminator} ({0.author.id}) "
"at {0.created_at}{1}{2}:\r\n\t{0.content}").format(
message, edited, attached)
async def get_log_text(bot, channel, **log_arguments):
"""Wrapper function for Carter's time machine."""
messages = []
async for message in channel.history(**log_arguments):
messages.append(message)
return '\r\n\r\n'.join(get_formatted_message(message) for message in reversed(messages))
async def send_text_as_file(channel, text, filename, extra=None, extension='txt'):
"""Sends the given text as a text file."""
discord_file = discord.File(
get_text_as_file(text), filename='{}.{}'.format(filename, extension))
reference = await channel.send(content=extra, file=discord_file)
return reference
def get_text_as_file(text):
"""Converts the text into a bytes object using BytesIO."""
try:
return io.BytesIO(bytes(str(text), 'utf-8'))
except Exception as e:
raise CBException("Failed to convert text to a file.", e=e)
def get_invoker(bot, guild=None, message=None):
"""Gets a suitable command invoker for the bot.
If a guild is specified, this will check for a custom invoker and
whether or not mention mode is enabled.
If a message is specified, this will obtain a guild as long as the message
was not sent in a private channel.
"""
if message and isinstance(message.channel, discord.TextChannel):
guild = message.guild
if guild:
guild_data = data.get(
bot, 'core', None, guild_id=guild.id, default={})
if guild_data.get('mention_mode', False):
invoker = '{} '.format(guild.me.display_name)
else:
invoker = guild_data.get('command_invoker', None)
else:
invoker = None
if invoker is None:
invoker = bot.command_invokers[0]
return invoker
async def notify_owners(bot, message, user_id=None):
"""Sends all owners a direct message with the given text.
If user_id is specified, this will check that the user is not in the
blacklist.
"""
if bot.selfbot:
logger.info("Owner notification:\n{}".format(message))
else:
if user_id:
blacklist = data.get(bot, 'core', 'blacklist', default=[])
if user_id in blacklist:
await asyncio.sleep(0.5)
return
for owner in bot.owners:
try:
user = await bot.fetch_user(owner)
if len(message) > 1990:
await send_text_as_file(user, message, 'notification')
else:
await user.send(message)
except Exception as e:
logger.error("Failed to notify owner %s: %s", owner, e)
def docker_send_command(command):
"""Sends the database Docker container a command."""
logger.debug("Sending database container command: %s", command)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(('db', 2345))
s.send(bytes(command, 'ascii'))
s.close()
def docker_receive_exit_code():
"""Waits until an exit code is returned from the database Docker container."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30)
s.bind(('', 2345))
s.listen(1)
connection, _ = s.accept()
response = connection.recv(64)
connection.close()
s.close()
logger.debug("Database container response: %s", response)
return response
# TODO: Add specific table dumping
def db_backup(bot, safe=True):
"""Use the Docker setup to backup the database."""
if not bot.docker_mode:
return
try:
logger.debug("Attemping to connect to the database container...")
if bot.dump_exclusions:
exclusions = '-T "' + '" -T "'.join(bot.dump_exclusions) + '"'
else:
exclusions = ''
command = (
'pg_dump -U postgres -F c {} postgres > '
'/external/data/db_dump'.format(exclusions))
docker_send_command(command)
logger.debug("Told database container to backup")
except Exception as e:
logger.warn("Failed to communicate with the database container: %s", e)
if safe:
return
raise CBException("Failed to communicate with the database container.", e=e)
# Read response code from database container
try:
return docker_receive_exit_code()
except Exception as e:
logger.warn("Failed to receive a response from the database container: %s", e)
if safe:
return
raise CBException("Failed to receive a response from the database container.", e=e)
def make_backup(bot):
"""Makes a backup of the data directory."""
logger.info("Making backup...")
db_backup(bot)
backup_indices = '{0}/temp/backup{{}}.zip'.format(bot.path)
if os.path.isfile(backup_indices.format(5)):
os.remove(backup_indices.format(5))
for it in range(1, 5):
backup_file_from = backup_indices.format(5-it)
backup_file_to = backup_indices.format(6-it)
if os.path.isfile(backup_file_from):
os.rename(backup_file_from, backup_file_to)
shutil.make_archive(backup_indices.format(1)[:-4], 'zip', '{}/data'.format(bot.path))
logger.info("Finished making backup.")
def restore_backup(bot, backup_file):
"""Restores a backup file given the backup filename."""
logger.info("Restoring from a backup file...")
try:
core.bot_data = {'global_users': {}, 'global_plugins': {}}
core.volatile_data = {'global_users': {}, 'global_plugins': {}}
shutil.unpack_archive(backup_file, '{}/data'.format(bot.path))
data.check_all(bot)
data.load_data(bot)
except Exception as e:
raise CBException("Failed to extract backup.", e=e)
logger.info("Finished data restore.")
def restore_db_backup(bot, tables=[]):
"""Restores a database dump backup file.
If tables is specified, this will restore those instead of the entire database.
"""
logger.info("Restoring database...")
try:
if tables:
specific_tables = '-t "' + '" -t "'.join(tables) + '"'
else:
specific_tables = ''
command = 'pg_restore -U postgres -d postgres {} /external/temp/db_dump'.format(specific_tables)
docker_send_command(command)
return docker_receive_exit_code()
except Exception as e:
raise CBException("Failed to restore backup.", e=e)
logger.info("Finished database restore.")
def get_timezone_offset(bot, guild_id=None, utc_dt=None, utc_seconds=None, as_string=False):
"""Converts the time to a guild's (guessed) local time.
Keyword arguments:
guild_id -- Retrieves the configured timezone of the given guild, or
guesses it based on the voice server region.
utc_dt -- A timezone-naive datetime object that gets shifted by the offset.
utc_seconds -- An integer value that gets shifted by the offset.
as_string -- The UTC offset is returned as a UTC+X string instead of an integer value.
If either utc_dt or utc_seconds are specified, the return type will be a tuple of two
elements. The first element is the offset value, the second element is the
shifted datetime object or seconds value.
"""
if guild_id is None:
offset = 0
else:
offset = data.get(bot, 'core', 'timezone', guild_id=guild_id)
if offset is None:
guild = bot.get_guild(guild_id)
offset = VOICE_REGIONS.get(str(guild.region), 0)
if 'us-' in str(guild.region): # Apply DST offset
if utc_dt and utc_dt.dst():
in_dst = utc_dt.timetuple().tm_isdst > 0
else:
in_dst = time.localtime(time.time()).tm_isdst > 0
if in_dst:
offset += 1
if as_string:
result = 'UTC{}'.format(('+' + str(offset)) if offset >= 0 else offset)
else:
result = offset
if utc_dt: # Convert UTC datetime object to "local" time
return (result, utc_dt + datetime.timedelta(hours=offset))
if utc_seconds is not None: # Convert UTC seconds to offset
return (result, utc_seconds + (3600 * offset))
else:
return result
def get_schedule_entries(
bot, plugin_name, search=None, destination=None, custom_match=None, custom_args=[]):
"""Gets the entries given the search or match arguments."""
if custom_match:
where_arg = custom_match
input_args = custom_args
else:
where_arg = 'plugin = %s'
input_args = [plugin_name]
if search is not None:
where_arg += ' AND search = %s'
input_args.append(search)
if destination is not None:
where_arg += ' AND destination = %s'
input_args.append(destination)
cursor = data.db_select(
bot, from_arg='schedule', where_arg=where_arg,
additional='ORDER BY time ASC', input_args=input_args, safe=False)
return cursor.fetchall()
def remove_schedule_entries(
bot, plugin_name, search=None, destination=None, custom_match=None, custom_args=[]):
"""Removes the entries given the search or match arguments."""
if custom_match:
where_arg = custom_match
input_args = custom_args
else:
where_arg = 'plugin = %s'
input_args = [plugin_name]
if search is not None:
where_arg += ' AND search = %s'
input_args.append(search)
if destination is not None:
where_arg += ' AND destination = %s'
input_args.append(destination)
return data.db_delete(bot, 'schedule', where_arg=where_arg, input_args=input_args)
def update_schedule_entries(
bot, plugin_name, search=None, destination=None, function=None,
payload=None, new_search=None, new_time=None, new_destination=None,
info=None, custom_match=None, custom_args=[]):
"""Updates the schedule entry with the given fields.
If any field is left as None, it will not be changed.
If custom_match is given, it must be a proper WHERE SQL clause. Otherwise
it will look for a direct match with search.
Returns the number of entries modified.
"""
if custom_match:
where_arg = custom_match
input_args = custom_args
else:
where_arg = 'plugin = %s'
input_args = [plugin_name]
if search is not None:
where_arg += ' AND search = %s'
input_args.append(search)
if destination is not None:
where_arg += ' AND destination = %s'
input_args.append(destination)
set_args = []
set_input_args = []
if function:
set_args.append('function=%s')
set_input_args.append(function.__name__)
if payload:
set_args.append('payload=%s')
set_input_args.append(Json(payload))
if new_time is not None:
set_args.append('time=%s')
set_input_args.append(int(new_time))
if new_search is not None:
set_args.append('search=%s')
set_input_args.append(new_search)
if new_destination:
set_args.append('destination=%s')
set_input_args.append(new_destination)
if info is not None:
set_args.append('info=%s')
set_input_args.append(info)
set_arg = ', '.join(set_args)
input_args = set_input_args + input_args
data.db_update(bot, 'schedule', set_arg=set_arg, where_arg=where_arg, input_args=input_args)
asyncio.ensure_future(_start_scheduler(bot))
def schedule(
bot, plugin_name, scheduled_time, function, payload=None,
search=None, destination=None, info=None):
"""Adds the entry to the schedule table and starts the timer.
It should be noted that the function CANNOT be a lambda function. It must
be a function residing in the top level of the plugin.
Time should be a number in seconds from the epoch.
The asynchronous function should take 6 arguments:
bot -- An instance of the bot.
scheduled_time -- Time at which the given function should be called.
payload -- Same as the keyword argument.
search -- Same as the keyword argument.
destination -- Same as the keyword argument.
late -- Whether or not the function was called late due to bot downtime.
info -- Same as the keyword argument.
id -- Unique ID assigned to the entry when it was created. Usually unused.
Keyword arguments:
payload -- Standard json-serializable dictionary
search -- Used to assist in later deletion or modification
destination -- Starts with either a 'c' or 'u', then the ID of the channel or user
This is used to help determine what will need to be messaged.
info -- Used as a description for the scheduled event if `!base notifications` is used.
"""
input_args = [
int(scheduled_time),
plugin_name,
function.__name__,
Json(payload),
search,
destination,
info
]
data.db_insert(bot, 'schedule', input_args=input_args, safe=False)
asyncio.ensure_future(_start_scheduler(bot))
def get_messageable(bot, destination):
"""Takes a destination in the schedule table format and returns a messageable."""
try:
if destination[0] == 'u': # User
get = bot.get_user
elif destination[0] == 'c': # Channel
get = bot.get_channel
else:
raise CBException("Must be either a user `u` or channel `c`.")
return get(int(destination[1:]))
except Exception as e:
raise CBException("Invalid destination format.", e=e)
async def _schedule_timer(bot, entry, delay):
task_comparison = bot.schedule_timer
await asyncio.sleep(0.5)
logger.debug("Scheduler sleeping for %s seconds...", delay)
await asyncio.sleep(delay)
if task_comparison is not bot.schedule_timer:
logger.debug("_schedule_timer was not cancelled! Cancelling this scheduler...")
return
if int(time.time() + 1) < entry.time:
logger.warn("_schedule_timer was about to delete the entry early! Restarting loop...")
asyncio.ensure_future(_start_scheduler(bot))
return
try:
deleted = data.db_delete(
bot, 'schedule', where_arg='id=%s', input_args=[entry.id], safe=False)
except Exception as e:
logger.warn("_schedule_timer failed to delete a schedule entry. %s", e)
if deleted:
try:
logger.debug("_schedule_timer done sleeping for %s seconds!", delay)
function = getattr(bot.plugins[entry.plugin], entry.function)
late = delay < -60
asyncio.ensure_future(function(
bot, entry.time, entry.payload, entry.search,
entry.destination, late, entry.info, entry.id))
except Exception as e:
logger.warn("Failed to execute scheduled function: %s", e)
asyncio.ensure_future(_start_scheduler(bot))
async def _start_scheduler(bot):
"""Starts the interal scheduler."""
await bot.wait_until_ready()
if bot.schedule_timer: # Scheduler already running
bot.schedule_timer.cancel()
bot.schedule_timer = None
cursor = data.db_select(
bot, from_arg='schedule', additional='ORDER BY time ASC', limit=1, safe=False)
result = cursor.fetchone()
if result:
delta = result.time - time.time()
logger.debug("Starting scheduled event %s", result.id)
bot.schedule_timer = asyncio.ensure_future(_schedule_timer(bot, result, delta))
else:
logger.debug("No pending scheduled event available.")
``` |
{
"source": "jkchen2/JshBot-plugins",
"score": 2
} |
#### File: JshBot-plugins/data_converter/data_converter.py
```python
import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
```
#### File: JshBot-plugins/no_awoo/no_awoo.py
```python
import time
import random
import unicodedata
import re
import discord
from jshbot import utilities, configurations, plugins, data, logger
from jshbot.exceptions import ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.4'
CBException = ConfiguredBotException('Awoo police')
uses_configuration = True
statements = None
substitutions = None
fine = None
BASIC_MATCH = re.compile(r'\ba+w+oo+\b')
ADVANCED_MATCH = re.compile(r'\ba+[a\s]*w+[w\s]*o\s*o+(\b|[\sise])')
PLEA_MATCH = re.compile(r'legali[zs]e *a+w+oo+')
@plugins.command_spawner
def get_commands(bot):
return [Command(
'awoo', subcommands=[
SubCommand(doc='Get fined.', allow_direct=False, function=awoo),
SubCommand(
Opt('stats'),
Arg('user', argtype=ArgTypes.MERGED_OPTIONAL,
convert=utilities.MemberConverter(server_only=False)),
doc='See how much money you or the given user owes.',
function=awoo_stats),
SubCommand(
Opt('leaderboard'),
doc='See the list of worst offenders.',
function=awoo_leaderboard),
SubCommand(
Opt('toggle'),
Arg('channel', argtype=ArgTypes.SPLIT_OPTIONAL,
convert=utilities.ChannelConverter(constraint=discord.TextChannel),
doc='Toggles detection in this channel.'),
doc='Toggles awoo detection.',
function=awoo_toggle, elevated_level=1),
SubCommand(
Opt('whitelist'),
Arg('user', argtype=ArgTypes.MERGED_OPTIONAL,
convert=utilities.MemberConverter()),
doc='Whitelist users from detection.',
function=awoo_whitelist, elevated_level=1),
SubCommand(
Opt('reset'),
Arg('user', argtype=ArgTypes.MERGED, convert=utilities.MemberConverter()),
function=awoo_reset, elevated_level=3)],
shortcuts=[
Shortcut(
'astats', 'stats {arguments}',
Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL)),
Shortcut('aleaderboard', 'leaderboard')
],
description='Consult the criminal database.')]
@plugins.db_template_spawner
def get_templates(bot):
return {
'awoo_template': (
"user_id bigint UNIQUE,"
"debt decimal,"
"violations integer,"
"sneaky integer")
}
@plugins.on_load
def setup_awoo_table(bot):
data.db_create_table(bot, 'awoo', template='awoo_template')
user_index = 'IX_awoo_order'
if not data.db_exists(bot, user_index):
data.db_execute(bot, 'CREATE INDEX {} ON awoo (debt DESC)'.format(user_index))
async def awoo(bot, context):
if not _awoo_check(bot, context.message): # User has been whitelisted. Force a violation
await _violation_notification(bot, context.message, 1)
async def awoo_stats(bot, context):
"""Pulls stats on the given user."""
user = context.arguments[0] or context.author
cursor = data.db_select(bot, from_arg='awoo', where_arg='user_id=%s', input_args=[user.id])
entry = cursor.fetchone() if cursor else None
if not entry:
raise CBException(
'{} has not made a single awoo violation. What a good cookie.'.format(user.mention))
embed = discord.Embed(title=':scales: Awoo violation statistics', description=user.mention)
embed.add_field(name='Debt', value='${}'.format(entry.debt))
embed.add_field(name='Violations', value='{}'.format(entry.violations))
return Response(embed=embed)
async def awoo_leaderboard(bot, context):
"""Displays the top 10 violators."""
cursor = data.db_select(bot, from_arg='awoo', additional='ORDER BY debt DESC', limit=10)
entries = cursor.fetchall() if cursor else []
if not entries:
raise CBException("Nobody has made any awoo violations yet!")
stats = [[], []] # debt/violations, user
for index, entry in enumerate(entries):
stats[0].append('`{0}.` ${1.debt} | {1.violations}'.format(index + 1, entry))
user = await data.fetch_member(bot, entry.user_id, safe=True, attribute='mention')
user = user or 'Unknown ({})'.format(entry.user_id)
stats[1].append('`\u200b`{}'.format(user))
embed = discord.Embed(title=':scales: Awoo violation leaderboard')
embed.add_field(name='Debt | Violations', value='\n'.join(stats[0]))
embed.add_field(name='User', value='\n'.join(stats[1]))
return Response(embed=embed)
async def awoo_toggle(bot, context):
"""Toggles awoo detection for either the guild or the given channel."""
guild_awoo_data = data.get(
bot, __name__, None, guild_id=context.guild.id, default={}, create=True)
# Channel
if context.arguments[0]:
changes = []
for channel in context.arguments:
if channel.id in guild_awoo_data.get('disabled_channels', []):
action = 'is now'
data.list_data_remove(
bot, __name__, 'disabled_channels',
value=channel.id, guild_id=context.guild.id)
else:
action = 'is no longer'
data.list_data_append(
bot, __name__, 'disabled_channels', channel.id, guild_id=context.guild.id)
changes.append('{} {} being monitored.'.format(channel.mention, action))
return Response(content='\n'.join(changes))
# Guild
else:
guild_awoo_data['enabled'] = not guild_awoo_data.get('enabled', False)
return Response(content='Detection is now {}abled'.format(
'en' if guild_awoo_data['enabled'] else 'dis'))
async def awoo_whitelist(bot, context):
"""(De)whitelists the given user."""
user = context.arguments[0]
whitelist = data.get(bot, __name__, 'whitelist', guild_id=context.guild.id, default=[])
# (De)whitelist user
if user:
if user.id in whitelist:
action = 'removed from'
data.list_data_remove(bot, __name__, 'whitelist', value=user.id, guild_id=context.guild.id)
else:
action = 'added to'
data.list_data_append(bot, __name__, 'whitelist', user.id, guild_id=context.guild.id)
return Response(content="User {} the whitelist.".format(action))
# Show whitelisted users
else:
if not whitelist:
raise CBException("There are no whitelisted users.")
users = [
(
(await data.fetch_member(bot, it, attribute='mention', safe=True)) or
'Unknown ({})'.format(it)
) for it in whitelist]
return Response(
embed=discord.Embed(title="Whitelisted users", description=', '.join(users)))
async def awoo_reset(bot, context):
"""Removes the given user from the database."""
user = context.arguments[0]
removed = data.db_delete(bot, 'awoo', where_arg='user_id=%s', input_args=[user.id])
if not removed:
raise CBException("User not in violation database.")
return Response(content="User removed from the database.")
def _awoo_check(bot, message, show_filtered=''):
"""
Checks for awoo violations.
Tier 1: Standard match
Tier 2: Bypass attempt match
Tier 3: Legalization plea
"""
# Initial content check
content = show_filtered or (message.clean_content.lower() if message.content else '')
author, channel = message.author, message.channel
if not content or author.bot or isinstance(channel, discord.abc.PrivateChannel):
return
# Ignore muted guilds, channels, and users
guild_data = data.get(bot, 'core', None, message.guild.id, default={})
if (guild_data.get('muted', False) or
channel.id in guild_data.get('muted_channels', []) or
author.id in guild_data.get('blocked', [])):
return
# Ignore disabled guilds, disabled channels and whitelisted users
guild_awoo_data = data.get(bot, __name__, None, guild_id=message.guild.id, default={})
if (not guild_awoo_data.get('enabled', False) or
channel.id in guild_awoo_data.get('disabled_channels', []) or
author.id in guild_awoo_data.get('whitelist', [])):
return
# Tier 3: Legalization plea
if PLEA_MATCH.search(content):
return 3
# Tier 1: Basic check
if BASIC_MATCH.search(content):
return 1
# Tier 2: Advanced check
filtered = content
for key, values in substitutions:
for value in values:
filtered = filtered.replace(value, key)
_check = lambda c: c.isalpha() or c.isspace()
filtered = ''.join(c.lower() for c in unicodedata.normalize('NFKD', filtered) if _check(c))
if ADVANCED_MATCH.search(filtered):
return 2
# Debug
if show_filtered:
return filtered
async def _violation_notification(bot, message, awoo_tier, send_message=True):
"""
Logs the violation and (optionally) sends the user a notification.
Standard notification: once per violation, up to 1 time
None: 2 violations
Silence notification: 1 violation
Reset period for notifications is 1 minute.
Stress indicates a number of users making a violation within a 60 second period.
Tier 1: 3 members
Tier 2: 5 members
Tier 3: 8 members
"""
author, channel = message.author, message.channel
current_time = time.time()
violation_data = data.get(
bot, __name__, 'user_violation', user_id=author.id, volatile=True)
channel_violation_data = data.get(
bot, __name__, 'channel_violation', channel_id=channel.id, volatile=True)
if not violation_data or current_time - violation_data['time'] >= 60:
violation_data = {'time': 0, 'violations': 0}
data.add(bot, __name__, 'user_violation', violation_data, user_id=author.id, volatile=True)
if not channel_violation_data or current_time - channel_violation_data['time'] >= 60:
channel_violation_data = {'time': 0, 'violators': set(), 'sent_tier': 0}
data.add(
bot, __name__, 'channel_violation', channel_violation_data,
channel_id=channel.id, volatile=True)
violation_data['violations'] += 1
violation_data['time'] = current_time
channel_violation_data['violators'].add(author.id)
channel_violation_data['time'] = current_time
# Update table
set_arg = 'debt = debt+%s, violations = violations+1'
if awoo_tier == 2:
set_arg += ', sneaky = sneaky+1'
cursor = data.db_select(bot, from_arg='awoo', where_arg='user_id=%s', input_args=[author.id])
entry = cursor.fetchone() if cursor else None
if entry:
data.db_update(
bot, 'awoo', set_arg=set_arg, where_arg='user_id=%s', input_args=[fine, author.id])
else:
data.db_insert(bot, 'awoo', input_args=[author.id, fine, 1, 1 if awoo_tier == 2 else 0])
# Add a snarky message depending on the tier
if awoo_tier == 2: # Attempted bypass
snark = random.choice(statements['bypass']) + '\n'
elif awoo_tier == 3: # Legalization plea
snark = random.choice(statements['legalize']) + '\n'
else:
snark = ''
# Notify user
logger.debug("Violations: %s", violation_data['violations'])
text = ''
if violation_data['violations'] <= 1:
text = "{}{} has been fined ${} for an awoo violation.".format(snark, author.mention, fine)
elif violation_data['violations'] == 4:
text = "{} {}".format(author.mention, random.choice(statements['silence']))
elif awoo_tier == 3 and violation_data['violations'] <= 3: # Legalization plea, but silent
text = snark
if send_message and text:
await channel.send(content=text)
else:
await message.add_reaction(random.choice(['🚩', '🛑', '❌', '⛔', '🚫']))
# Stress
violators, sent_tier = channel_violation_data['violators'], channel_violation_data['sent_tier']
if (len(violators) == 3 and sent_tier == 0 or
len(violators) == 5 and sent_tier == 1 or
len(violators) == 8 and sent_tier == 2):
if send_message:
await message.channel.send(random.choice(statements['stress'][sent_tier]))
channel_violation_data['sent_tier'] += 1
@plugins.listen_for('on_message')
async def check_awoo_messages(bot, message):
awoo_tier = _awoo_check(bot, message)
if awoo_tier: # Awoo detected
await _violation_notification(bot, message, awoo_tier)
@plugins.listen_for('on_message_edit')
async def check_awoo_edits(bot, message_before, message_after):
if _awoo_check(bot, message_before): # Prevent a little edit abuse
return
awoo_tier = _awoo_check(bot, message_after)
if awoo_tier:
await _violation_notification(bot, message_after, awoo_tier, send_message=False)
@plugins.listen_for('bot_on_ready_boot')
async def setup_globals(bot):
global statements, substitutions, fine
statements = configurations.get(bot, __name__, extra='statements', extension='json')
substitutions = configurations.get(bot, __name__, extra='substitutions', extension='json')
fine = configurations.get(bot, __name__, 'fine')
```
#### File: JshBot-plugins/playlist/playlist.py
```python
import random
import asyncio
import time
import math
import yaml
import discord
from urllib.parse import urlparse
from collections import OrderedDict, deque
from psycopg2.extras import Json
from datetime import datetime
from enum import Enum, IntEnum
from youtube_dl import YoutubeDL
from tinytag import TinyTag
from jshbot import utilities, configurations, data, plugins, logger
from jshbot.exceptions import ConfiguredBotException, BotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.3.12'
CBException = ConfiguredBotException('Music playlist')
uses_configuration = True
TITLE_LIMIT = 50 # Track title character limit in the track explorer
URL_LIMIT = 140 # Track URL limit to be displayed in the track explorer
MIRROR_TIMER = 60 # Chat mirror timer in seconds
class States(IntEnum):
PLAYING, PAUSED, STOPPED, LOADING = range(4)
class Modes(IntEnum):
PLAYLIST, QUEUE = range(2)
class Control(IntEnum):
ALL, PARTIAL, DJS = range(3)
@plugins.command_spawner
def get_commands(bot):
max_threshold = configurations.get(bot, __name__, key='max_threshold')
max_cutoff = configurations.get(bot, __name__, key='max_cutoff')
max_user_track_limit = configurations.get(bot, __name__, key='max_user_track_limit')
max_total_track_limit = configurations.get(bot, __name__, key='max_total_track_limit')
async def check_whitelist(bot, context):
config = configurations.get(bot, __name__)
if config['use_whitelist'] and context.guild.id not in config['whitelist']:
raise CBException("This server is not in the music player whitelist.")
new_commands = []
new_commands.append(Command(
'playlist', subcommands=[
SubCommand(
Opt('tracks'), doc='View the entire playlist', function=format_tracklist),
SubCommand(
Opt('import'),
Opt('youtube', attached='url', optional=True, quotes_recommended=False),
Attachment('tracklist file', optional=True),
doc='Adds the tracks in the attached tracklist file, '
'or from the YouTube playlist link. Only DJs can import '
'tracks to prevent abuse.',
function=import_tracklist),
SubCommand(
Opt('info'),
Arg('track number', quotes_recommended=False, convert=int),
doc='Retrieves the song information of the given track number.',
function=get_info),
SubCommand(
Opt('add'),
Arg('query', argtype=ArgTypes.MERGED),
doc='Adds a song to the playlist. Can either be a URL to a supported site '
'(YouTube, Bandcamp, SoundCloud, etc.) or a YouTube search query',
function=add_track),
SubCommand(
Opt('remove'),
Arg('track number', quotes_recommended=False, convert=int),
doc='Removes the given track number from the playlist.',
function=remove_track),
SubCommand(
Opt('volume'),
Arg('percent', quotes_recommended=False,
convert=utilities.PercentageConverter(),
check=lambda b, m, v, *a: 0.01 <= v <= 1.0,
check_error='Must be between 1% and 100% inclusive.'),
doc='Sets the player volume to the given percentage.',
function=set_volume),
SubCommand(
Opt('configure'),
Opt('threshold', attached='seconds', optional=True, group='options',
quotes_recommended=False, convert=int,
check=lambda b, m, v, *a: 10 <= v <= max_threshold,
check_error='Must be between 10 and {} seconds.'.format(max_threshold)),
Opt('cutoff', attached='seconds', optional=True, group='options',
quotes_recommended=False, convert=int,
check=lambda b, m, v, *a: 10 <= v <= max_cutoff,
check_error='Must be between 10 and {} seconds.'.format(max_cutoff)),
Opt('usertracks', attached='limit', optional=True, group='options',
quotes_recommended=False, convert=int,
check=lambda b, m, v, *a: 0 <= v <= max_user_track_limit,
check_error='Must be between 0 and {}.'.format(max_user_track_limit),
doc='Limits the number of tracks users can add to the player. 0 for no limit'),
Opt('totaltracks', attached='limit', optional=True, group='options',
quotes_recommended=False, convert=int,
check=lambda b, m, v, *a: 0 <= v <= max_total_track_limit,
check_error='Must be between 0 and {}.'.format(max_total_track_limit),
doc='Limits the total number of tracks for the player. 0 for no limit'),
Opt('djrole', attached='role', optional=True, group='options',
convert=utilities.RoleConverter()),
Opt('channel', attached='text channel', optional=True, group='options',
quotes_recommended=False,
convert=utilities.ChannelConverter(constraint=discord.TextChannel),
doc='Sets the text channel the player will use for the interface.'),
Opt('switchcontrol', optional=True, group='options',
doc='Switches between DJ only, partial, and public control types.'),
Opt('switchmode', optional=True, group='options',
doc='Switches between repeating playlist and single play queue mode.'),
Opt('mirrorchat', optional=True, group='options',
doc='Mirrors the last few chat messages to a message above the player.'),
Opt('autodisconnect', optional=True, group='options',
doc='Automatically disconnects the bot if all users leave the channel.'),
doc='Configures the music player properties.',
function=configure_player),
SubCommand(Opt('clear'), doc='Clears the playlist.', function=clear_playlist),
SubCommand(
Opt('page'),
Arg('number', convert=int, quotes_recommended=False),
doc='Displays the given page.', function=skip_to_page),
SubCommand(
Opt('swap'),
Arg('track 1', convert=int, quotes_recommended=False),
Arg('track 2', convert=int, quotes_recommended=False),
doc='Swaps the position of the given tracks.', function=swap_tracks),
SubCommand(
Opt('control'),
Opt('pause', optional=True, group='action'),
Opt('resume', optional=True, group='action'),
Opt('stop', optional=True, group='action'),
Opt('next', optional=True, group='action'),
Opt('skip', optional=True, group='action'),
Opt('previous', optional=True, group='action'),
doc='Basic controls for the player. Only one option can be provided at a time.',
confidence_threshold=10, function=control_player),
SubCommand(
Opt('play'),
Opt('track', attached='track number', optional=True,
quotes_recommended=False, convert=int,
doc='Plays the given track number.'),
Arg('query', argtype=ArgTypes.MERGED_OPTIONAL,
doc='Either a URL to a supported site (YouTube, Bandcamp, '
'SoundCloud, etc.), or a YouTube search query.'),
confidence_threshold=5, doc='Plays (or adds) the given track.',
function=setup_player, id='play'),
SubCommand(doc='Shows the music player interface.', function=setup_player, id='show'),
],
shortcuts=[
Shortcut('p', '{arguments}', Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL)),
Shortcut('add', 'add {query}', Arg('query', argtype=ArgTypes.MERGED)),
Shortcut('remove', 'remove {number}', Arg('number', argtype=ArgTypes.MERGED)),
Shortcut('volume', 'volume {percent}', Arg('percent', argtype=ArgTypes.MERGED)),
Shortcut(
'play', 'play {arguments}',
Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL)),
Shortcut('pause', 'control pause'),
Shortcut('resume', 'control resume'),
Shortcut('skip', 'control skip'),
Shortcut('next', 'control next'),
Shortcut('previous', 'control previous')],
allow_direct=False, category='music',
pre_check=check_whitelist, description='Play music.'))
return new_commands
@plugins.db_template_spawner
def get_templates(bot):
return {
'playlist_template': (
"url text,"
"downloadurl text,"
"title text,"
"duration integer,"
"userid bigint,"
"timestamp bigint,"
"extra json,"
"id serial UNIQUE"
)
}
class MusicPlayer():
def __init__(self, bot, message, autoplay=False, track_index=None):
# Discord information
self.bot = bot
self.channel = message.channel
self.author = message.author
self.voice_channel = message.author.voice.channel
self.guild = message.guild
self.voice_client = None
self.source = None
self.embed = None
self.message = None # Set later
self.satellite_message = None
self.satellite_data = None
self.mirror_message = None
self.mirror_last_notification = None
self.mirror_notifications = deque(maxlen=5)
self.mirror_chats = deque(maxlen=12)
# Update/internal tasks
self.timer_task = None # Player timer
self.command_task = None # Waits for reaction commands
self.progress_task = None # Refreshes the progress bar
self.state_check_task = None # Checks voice state changes
self.chat_mirror_task = None # Mirrors chat every 10 seconds
self.autoplay_task = None # Short-lived task for autostarting the player
# Player information
self.state = States.LOADING
self.loading_interface = False
self.first_time_startup = True
self.now_playing = None
self.notification = None
self.page = 0
self.progress = 0
self.start_time = 0
self.last_interface_update = 0
self.listeners = 0
self.skip_voters = []
self.skip_threshold = 0.5
self.shuffle_stack = []
self.autopaused = False
self.tracklist = None
self.tracklist_url = ''
self.tracklist_time = 0
self.tracklist_update_time = 0
self.update_tracklist()
self.update_config()
if self.mode == Modes.QUEUE:
self.track_index = 0 # Track index in queue mode doesn't change
else:
if self.shuffle and self.tracklist:
self.track_index = random.randint(0, len(self.tracklist) - 1)
else:
self.track_index = data.get(
self.bot, __name__, 'last_index', guild_id=self.guild.id, default=0)
if not 0 <= self.track_index < len(self.tracklist):
self.track_index = 0
# Build interface
asyncio.ensure_future(self._connect(autoplay=autoplay, track_index=track_index))
def update_config(self):
guild_id = self.guild.id
default_threshold = configurations.get(self.bot, __name__, key='max_threshold')
default_cutoff = configurations.get(self.bot, __name__, key='max_cutoff')
self.threshold = data.get(
self.bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)
self.cutoff = data.get(
self.bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)
self.control = data.get(
self.bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)
self.mode = data.get(
self.bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)
self.shuffle = data.get(
self.bot, __name__, 'shuffle', guild_id=guild_id, default=Modes.QUEUE)
self.mirror_chat = data.get(
self.bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)
self.auto_disconnect = data.get(
self.bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)
self.volume = data.get(self.bot, __name__, 'volume', guild_id=guild_id, default=1.0)
if self.source:
self.source.volume = self.volume
# Actively update threshold/cutoff timer
if self.timer_task and self.state == States.PLAYING:
self.timer_task.cancel()
self.timer_task = asyncio.ensure_future(
self._track_timer(*self._get_delay(config_update=True)))
async def _connect(self, autoplay=False, track_index=None):
is_mod = data.is_mod(self.bot, member=self.author)
try:
self.voice_client = await utilities.join_and_ready(
self.bot, self.voice_channel, is_mod=is_mod, reconnect=True)
except Exception as e:
self.state = States.STOPPED
error = CBException("Failed to start the player interface.", e=e)
await self.channel.send(embed=error.embed)
else:
await asyncio.sleep(1) # Safety sleep
await self._build_interface()
# Start playback if necessary
if autoplay:
self.autoplay_task = asyncio.ensure_future(
self._autoplay(track_index=track_index))
async def _autoplay(self, track_index=None):
safety_timeout = 0
while self.state == States.LOADING:
if safety_timeout > 30:
raise CBException("Autoplay failed.")
await asyncio.sleep(0.5)
safety_timeout += 0.5
asyncio.ensure_future(self.play(track_index=track_index, author=self.author))
def update_tracklist(self):
self.tracklist_update_time = time.time()
self.tracklist = _get_tracklist(self.bot, self.guild)
async def update_state(self):
if self.state == States.STOPPED:
return
if not (self.voice_client and self.voice_channel):
logger.warn("update_state detected that the bot disconnected. Stopping now.")
await self.stop(
text="The player has been stopped due to an undetected disconnection.")
elif (
(self.voice_client.is_playing() and self.voice_client.source != self.source) or
self.guild.me not in self.voice_channel.members):
logger.warn("update_state detected an unstopped instance. Stopping now.")
await self.stop(
text="The player has been stopped due to a different audio source being in use.")
async def reset_player_messages(self):
"""Rebuilds the set of 3 messages if one is somehow deleted."""
await self.set_new_message(self.message)
self.mirror_last_notification = ""
self.notification = "A message was unexpectedly deleted."
async def set_new_message(self, message, autoplay=False, track_index=None):
"""Bumps up the player interface to the bottom of the channel."""
# Prevent issues with trying to set a new message too quickly
if self.loading_interface:
logger.warn("Ignoring interface refresh reques as the interface is still loading")
if autoplay:
self.autoplay_task = asyncio.ensure_future(
self._autoplay(track_index=track_index))
return
self.loading_interface = True
if self.command_task:
self.command_task.cancel()
if self.progress_task:
self.progress_task.cancel()
if self.state_check_task:
self.state_check_task.cancel()
if self.chat_mirror_task:
self.chat_mirror_task.cancel()
if self.message:
for old_message in (self.message, self.satellite_message, self.mirror_message):
try:
await old_message.delete()
except Exception as e:
logger.warn("Couldn't delete original messages: %s", e)
self.channel = message.channel
self.author = message.author
self.satellite_data = None # Force update
asyncio.ensure_future(self._build_interface(resume=self.state == States.PLAYING))
if autoplay:
self.autoplay_task = asyncio.ensure_future(
self._autoplay(track_index=track_index))
async def _build_interface(self, resume=False):
"""Sets up player messages and the main interface structure."""
self.state = States.LOADING
self.loading_interface = True
self.satellite_message = await self.channel.send(embed=discord.Embed(title="\u200b"))
self.mirror_message = await self.channel.send(embed=discord.Embed(title="\u200b"))
embed = discord.Embed(colour=discord.Colour(0xffab00))
embed.add_field( # Title
name=':arrows_counterclockwise: **[]**',
value='**`[{}]` [ `0:00` / `0:00` ]**'.format('-' * 50), inline=False)
embed.add_field(name='---', value='---', inline=False) # Info
embed.add_field(name='---', value='---', inline=False) # Listeners
embed.add_field(name='---', value='---\n' * 6, inline=False) # Tracklist
embed.add_field(name='---', value='---') # Notification
self.embed = embed
self.message = await self.channel.send(embed=embed)
self.command_task = asyncio.ensure_future(self._command_listener(resume=resume))
async def _progress_loop(self):
"""Refreshes the progress bar."""
await asyncio.sleep(5)
while True:
await self.update_state()
if self.state == States.PLAYING:
self.update_listeners(update_interface=False)
if time.time() - self.last_interface_update >= 4:
asyncio.ensure_future(self.update_interface())
asyncio.ensure_future(self.update_satellite())
await asyncio.sleep(5)
elif self.state in (States.PAUSED, States.LOADING):
# TODO: Implement idle timeout
await asyncio.sleep(1)
else: # Stopped
logger.warn("Progress loop wasn't cancelled for some reason. Stopping loop...")
return
async def _chat_mirror_loop(self):
"""Mirrors chat messages after 10 seconds."""
async def _delete_and_update(message):
await asyncio.sleep(MIRROR_TIMER)
if self.state == States.STOPPED or not self.mirror_chat:
return
try:
await message.delete()
except Exception as e:
pass
else:
await self.update_mirror(new_chat=message)
while True:
message = await self.bot.wait_for('message')
if (not self.mirror_chat or
not message or
self.state == States.STOPPED or
message.channel != self.channel):
continue
# Don't log player messages by the bot or non-standard messages (like pins)
player_messages = (self.message.id, self.satellite_message.id, self.mirror_message.id)
if message.type is discord.MessageType.default and message.id not in player_messages:
asyncio.ensure_future(_delete_and_update(message))
async def _listener_loop(self):
"""Checks the state of members in the voice channel."""
class VoiceChange(Enum):
NORMAL, LEFT, JOINED = range(3)
def check(member, before, after):
if member.guild != self.guild:
return VoiceChange.NORMAL
elif not member == self.bot.user and (member.bot or not (before or after)):
return VoiceChange.NORMAL
elif after and after.channel == self.voice_channel:
if not before or before.channel != self.voice_channel:
return VoiceChange.JOINED
elif before and before.channel == self.voice_channel:
if not after or after.channel != self.voice_channel:
return VoiceChange.LEFT
return VoiceChange.NORMAL
# Preliminary check
self.listeners = len([it for it in self.voice_channel.members if not it.bot])
# Wait on voice state updates to determine users entering/leaving
while True:
result = await self.bot.wait_for('voice_state_update')
if not result:
continue
elif self.state == States.STOPPED:
return
member, before, after = result
# Check for self changes
if member == self.bot.user and member.guild == self.guild:
if not after: # Disconnected
# TODO: Consider adding failsafe stop
logger.warn("Voice disconnected, detected from _listener_loop.")
return
if before != after:
logger.debug("Bot was dragged to a new voice channel.")
if after.channel == self.guild.afk_channel: # TODO: Act on AFK channel
logger.warn("Moved to the AFK channel. Failsafe stopping.")
self.voice_channel = after.channel
self.voice_client = self.guild.voice_client
# Update listener count
self.listeners = len([it for it in self.voice_channel.members if not it.bot])
logger.debug("Voice state updated. Listeners: %s", self.listeners)
self.update_listeners(update_interface=False)
voice_change = check(*result)
if voice_change is VoiceChange.LEFT:
if member.id in self.skip_voters:
self.skip_voters.remove(member.id)
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
elif voice_change is VoiceChange.JOINED:
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
if self.listeners == 0:
if self.auto_disconnect:
asyncio.ensure_future(
self.stop(
text=(
"The player has been stopped due to all users leaving the channel."
)
)
)
else:
self.autopaused = True
self.notification = "The player has been automatically paused"
asyncio.ensure_future(self.pause())
def update_listeners(self, update_interface=True):
"""Updates the number of listeners and skips the song if enough people have voted."""
current_listeners = [it.id for it in self.voice_channel.members]
for member_id in self.skip_voters[:]:
if member_id not in current_listeners:
self.skip_voters.remove(member_id)
# Skip if enough votes
needed_votes = math.ceil(self.listeners * self.skip_threshold)
if needed_votes and len(self.skip_voters) >= needed_votes:
index_string = '[[Track{}]{}]'.format(
' {}'.format(self.track_index + 1) if self.mode == Modes.PLAYLIST else '',
_build_shortlink(self.bot, self.now_playing))
self.notification = "{} was voteskipped ({} vote{})".format(
index_string, len(self.skip_voters), '' if len(self.skip_voters) == 1 else 's')
del self.skip_voters[:]
self._skip_track()
elif update_interface:
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
async def update_interface(self, notification_text='', ignore_ratelimit=False):
"""Calls the other functions to update the main interface."""
await self.update_notification(text=notification_text)
await self.update_title()
await self.update_info()
await self.update_footer()
if not ignore_ratelimit and time.time() - self.last_interface_update < 1:
return
try:
await self.message.edit(content=None, embed=self.embed)
self.last_interface_update = time.time()
except discord.NotFound:
await self.reset_player_messages()
async def update_satellite(self):
"""Updates the satellite with track data."""
if not self.now_playing and self.satellite_data: # Player stopped
self.satellite_data = None
await self.satellite_message.edit(embed=discord.Embed())
return
elif not self.now_playing or self.now_playing.extra == self.satellite_data:
return
self.satellite_data = extra = self.now_playing.extra
embed = discord.Embed()
keys = ('uploader', 'views', 'likes', 'dislikes', 'uploaded')
if any(key in extra for key in keys):
info_list = ['{}: {}'.format(key.title(), extra[key]) for key in keys if key in extra]
embed.add_field(name='Info', value='\n'.join(info_list))
if 'description' in extra:
description = extra['description']
chunks = [description[it:it + 1000] for it in range(0, len(description), 1000)]
if len(chunks) > 3:
chunks = chunks[:3]
chunks[-1] += '…'
for index, chunk in enumerate(chunks):
embed.add_field(name='Description' if index == 0 else '\u200b', value=chunk)
if 'thumbnail' in extra:
embed.set_image(url=extra['thumbnail'])
if 'artist_thumbnail' in extra:
embed.set_thumbnail(url=extra['artist_thumbnail'])
try:
await self.satellite_message.edit(embed=embed)
except discord.NotFound:
await self.reset_player_messages()
async def update_mirror(self, new_notification=None, new_chat=None):
"""Updates the mirror message with notification or chat data."""
if new_notification:
if new_notification != self.mirror_last_notification:
self.mirror_last_notification = new_notification
self.mirror_notifications.append(new_notification)
if new_chat:
self.mirror_chats.append(new_chat)
embed = discord.Embed()
while sum(len(it) for it in self.mirror_notifications) > 1000:
self.mirror_notifications.popleft()
notifications = '\u200b' + '\n'.join(self.mirror_notifications)
embed.add_field(name='Recent notifications:', value=notifications, inline=False)
if self.mirror_chat:
for _ in range(3):
embed.add_field(name='\u200b', value='\u200b', inline=False)
formatted_chats = []
def _length_check(segment_index):
"""Checks the length of a set of 4 messages given the segment."""
segment = formatted_chats[4 * segment_index:4 * segment_index + 4]
return sum(len(it) for it in segment) < 1000
# Format messages
for message in self.mirror_chats:
if message.attachments:
attachment = ' [(Attachment)]({})'.format(message.attachments[0].url)
else:
attachment = ''
if message.content:
content = message.content
elif message.embeds:
title, description = message.embeds[0].title, message.embeds[0].description
title_text = '{}: '.format(title) if title else ''
description_text = description if description else '[No description]'
content = '{}{}'.format(title_text, description_text)
else:
content = '[Empty message]'
if len(content) > 500:
content = content[:500] + '…'
content = content.replace('```', '\`\`\`')
formatted_chats.append('[{}{}]: {}'.format(
message.author.mention, attachment, content))
# Remove messages if one is too long
for it in range(2, -1, -1):
while not _length_check(it):
del formatted_chats[0]
# Set embeds
segments = [formatted_chats[it:it + 4] for it in range(0, 12, 4)]
for index, segment in enumerate(segments):
embed.set_field_at(
index + 1, name='Recent chat messages:' if index == 0 else '\u200b',
value='\u200b' + '\n'.join(segment), inline=False)
try:
await self.mirror_message.edit(embed=embed)
except discord.NotFound:
await self.reset_player_messages()
async def update_footer(self):
"""Updates volume display, control type, and player mode in the footer."""
if self.volume < 0.3:
volume_indicator = '\U0001F508'
elif self.volume < 0.6:
volume_indicator = '\U0001F509'
else:
volume_indicator = '\U0001F50A'
footer_text = '{}: {}% | {} | {}{}{} | Click \u2753 for help'.format(
volume_indicator,
int(self.volume * 100),
('Public', 'Partially public', 'DJs only')[self.control],
'\U0001F500 ' if self.mode == Modes.PLAYLIST and self.shuffle else '',
('Playlist', 'Queue')[self.mode],
' | Mirroring chat' if self.mirror_chat else '')
self.embed.set_footer(text=footer_text)
async def update_title(self):
"""Updates the now playing title and progress bar"""
# Calculate progress and set embed color
if self.state == States.PLAYING:
progress = self.progress + (time.time() - self.start_time)
status_icon = ':arrow_forward:'
color = discord.Color(0x3b88c3)
elif self.state == States.PAUSED:
progress = self.progress
status_icon = ':pause_button:'
color = discord.Color(0xccd6dd)
else:
progress = 0
status_icon = ':arrows_counterclockwise:'
color = discord.Color(0xffab00)
self.embed.color = color
# Set title and progress
if self.now_playing:
title = _truncate_title(self.now_playing.title, limit=60)
duration = self.now_playing.duration
else:
title = '---'
duration = 0
new_name = '{} **[{}]**'.format(status_icon, title)
percentage = 0 if duration == 0 else progress / duration
progress_bar = '\u2588' * int(50 * percentage)
new_value = '**`[{:-<50}]` [ `{}` / `{}` ]**'.format(
progress_bar, utilities.get_time_string(progress),
utilities.get_time_string(duration))
self.embed.set_field_at(0, name=new_name, value=new_value, inline=False)
async def update_info(self):
"""Updates the info, listeners, and track list explorer display."""
# Listeners
new_name = '{} listener{}'.format(self.listeners, '' if self.listeners == 1 else 's')
new_value = '[ {} / {} ] :eject: votes needed to skip'.format(
len(self.skip_voters), math.ceil(self.listeners * self.skip_threshold))
self.embed.set_field_at(2, name=new_name, value=new_value, inline=False)
# Tracklist slice
total_tracks = len(self.tracklist)
total_duration = sum(it.duration for it in self.tracklist)
total_pages = max(int((total_tracks + 4) / 5), 1)
self.page %= total_pages
displayed_tracks = self.tracklist[self.page * 5:(self.page * 5) + 5]
# Build individual track entries from slice
info = ['---'] * 5 + ['Page [ {} / {} ]'.format(self.page + 1, total_pages)]
for index, entry in enumerate(displayed_tracks):
duration = utilities.get_time_string(entry.duration)
entry_index = (self.page * 5) + index + 1
full_title = entry.title.replace('`', '').replace('*', '')
title = _truncate_title(full_title)
use_indicator = entry_index == self.track_index + 1 and self.mode == Modes.PLAYLIST
info[index] = ('**[`{}{}`]{}**: ({}) *{}*'.format(
'▶ ' if use_indicator else '', entry_index,
_build_shortlink(self.bot, entry), duration, title))
new_value = '\n'.join(info)
# Total tracks and runtime
player_mode = 'queued' if self.mode == Modes.QUEUE else 'in the playlist'
if total_tracks > 0:
new_name = '{} track{} {} (runtime of {}):'.format(
total_tracks, '' if total_tracks == 1 else 's', player_mode,
utilities.get_time_string(total_duration, text=True))
else:
new_name = 'No tracks {}'.format(player_mode)
self.embed.set_field_at(3, name=new_name, value=new_value, inline=False)
# Info
if self.now_playing:
new_name = 'Info:'
time_ago = time.time() - self.now_playing.timestamp
index_string = '[[Track{}]{}]'.format(
' {}'.format(self.track_index + 1) if self.mode == Modes.PLAYLIST else '',
_build_shortlink(self.bot, self.now_playing))
new_value = 'Playing: {} Added by <@{}> {} ago'.format(
index_string, self.now_playing.userid,
utilities.get_time_string(time_ago, text=True))
else:
new_name = '---'
new_value = '---'
# Determine next track
if len(self.tracklist) == 0:
next_index = -1
new_value += '\n---'
elif self.now_playing is None:
next_index = 0 if self.mode == Modes.QUEUE else self.track_index
elif self.track_index + 1 >= len(self.tracklist):
next_index = 0
else:
if self.mode == Modes.PLAYLIST:
next_index = self.track_index + 1
else:
next_index = 0
# Show next track if available
if next_index != -1:
next_track = self.tracklist[next_index]
if next_index >= 0:
if self.mode == Modes.PLAYLIST and self.shuffle:
new_value += '\nUp next: [Track ?]'
else:
new_value += '\nUp next: {}'.format(
_build_track_details(self.bot, next_track, next_index))
self.embed.set_field_at(1, name=new_name, value=new_value, inline=False)
async def update_notification(self, text=''):
if text:
self.notification = text
elif not self.notification:
self.notification = 'No notification.'
if self.notification != self.mirror_last_notification:
asyncio.ensure_future(self.update_mirror(new_notification=self.notification))
self.embed.set_field_at(4, name='Notification:', value=self.notification)
def _skip_track(self):
"""Skips the current track (even if paused)."""
delta = 1 if self.mode == Modes.PLAYLIST else 0
if self.mode == Modes.PLAYLIST and self.shuffle:
if self.now_playing:
self.shuffle_stack.append(self.now_playing.id)
if len(self.tracklist) > 1:
new_track_index = random.randint(0, len(self.tracklist) - 2)
if new_track_index >= self.track_index:
new_track_index += 1
else:
new_track_index = 0
else:
new_track_index = self.track_index + delta
asyncio.ensure_future(self.play(track_index=new_track_index))
async def _track_timer(self, sleeptime, use_skip=False):
"""Sleeps until the end of the song or cutoff. Plays the next track afterwards."""
logger.debug("Sleeping for %s seconds. Time: %s", sleeptime, time.time())
track_check = self.now_playing
await asyncio.sleep(sleeptime)
logger.debug("Finished sleeping for %s seconds. Time: %s", sleeptime, time.time())
await self.update_state()
if self.state == States.STOPPED or track_check != self.now_playing:
logger.debug("The track timer resumed?")
return
while self.state == States.LOADING:
logger.warn("Player was moved while the track was loading.")
await asyncio.sleep(1)
if self.mode == Modes.PLAYLIST and self.shuffle:
logger.debug("Adding track %s to the shuffle stack", track_check.title)
self.shuffle_stack.append(track_check.id)
if len(self.tracklist) > 1:
new_track_index = random.randint(0, len(self.tracklist) - 2)
if new_track_index >= self.track_index:
new_track_index += 1
else:
new_track_index = 0
asyncio.ensure_future(self.play(track_index=new_track_index, skipped=use_skip))
else:
logger.debug('_track_timer is moving on: %s', use_skip)
asyncio.ensure_future(self.play(skipped=use_skip))
def _get_delay(self, config_update=False): # Gets track delay with cutoff
if self.now_playing.duration > self.threshold:
duration = self.cutoff
use_skip = self.now_playing
else:
duration = self.now_playing.duration
use_skip = False
if config_update:
current_progress = self.progress + time.time() - self.start_time
else:
current_progress = self.progress
return (max(duration - current_progress, 0), use_skip)
async def play(self, track_index=None, skipped=False, wrap_track_numbers=True, author=None):
"""Plays (the given track).
Keyword arguments:
track_index -- The specific track to play.
In queue mode, -1 indicates to repeat the current track.
skipped -- Whether or not the last track was skipped due to a length constraint.
wrap_track_numbers -- Wraps out-of-bounds track indices to the nearest edge.
author -- If provided, displays a notification on who started the player.
"""
# Ignore loading player
if self.state in (States.LOADING, States.STOPPED):
return
# Resume player if paused
if (self.state == States.PAUSED and
self.now_playing and self.progress and track_index is None):
self.state = States.PLAYING
self.voice_client.resume()
self.start_time = time.time()
self.timer_task = asyncio.ensure_future(self._track_timer(*self._get_delay()))
author_text = '{} resumed the player'.format(author.mention) if author else ''
asyncio.ensure_future(self.update_interface(notification_text=author_text))
self.autopaused = False # Reset single-time resume state
return
# No more tracks left to play
if len(self.tracklist) == 0 and not (track_index == -1 and self.state == States.PLAYING):
self.notification = "There are no more tracks in the queue"
if self.voice_client.is_playing():
self.voice_client.stop()
self.source = None
self.now_playing = None
self.first_time_startup = True # Reset so non-DJs can start the player again
self.progress = 0
self.state = States.PAUSED
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
asyncio.ensure_future(self.update_satellite())
return
# No track index was given - act as a skip
if track_index is None and self.now_playing:
if self.mode == Modes.PLAYLIST:
self.track_index = (self.track_index + 1) % len(self.tracklist)
# A specific track index was given
elif track_index is not None:
if track_index != -1 and not 0 <= track_index < len(self.tracklist):
if wrap_track_numbers:
if track_index >= len(self.tracklist):
track_index = 0
elif track_index < 0:
track_index = -1
else:
self.notification = (
'Index must be between 1 and {} inclusive'.format(len(self.tracklist)))
asyncio.ensure_future(self.update_interface())
return
# Wrap a backwards skip to the end of the playlist in playlist mode
if self.mode == Modes.PLAYLIST:
if track_index == -1:
track_index = len(self.tracklist) - 1
self.track_index = track_index
# Track from playlist
if self.mode == Modes.PLAYLIST:
track = self.tracklist[self.track_index]
# Track from queue
else:
# Repeat current track
if track_index == -1:
if self.now_playing:
track = self.now_playing
else:
return
# Skip to specific track by removing it from the database first
else:
if track_index is None:
track_index = 0
track = self.tracklist[0 if track_index == -1 else track_index]
data.db_delete(
self.bot, 'playlist', table_suffix=self.guild.id,
where_arg='id=%s', input_args=[track.id])
self.update_tracklist()
self.autopaused = False # Reset single-time resume state
# Setup the player
logger.debug("Preparing to play the next track.")
self.page = int(self.track_index / 5)
del self.skip_voters[:]
if self.state == States.PLAYING:
if self.voice_client.is_playing():
self.voice_client.stop()
if self.timer_task:
self.timer_task.cancel()
self.first_time_startup = not bool(self.now_playing)
self.state = States.LOADING
self.now_playing = track
sound_file = data.get_from_cache(self.bot, None, url=track.url)
# Audio not found in cache, download now instead
if not sound_file:
asyncio.ensure_future(self.update_interface())
logger.debug("Not found in cache. Downloading...")
try:
options = {'format': 'bestaudio/best', 'noplaylist': True}
downloader = YoutubeDL(options)
sound_file = await data.add_to_cache_ydl(self.bot, downloader, track.url)
except Exception as e: # Attempt to redownload from base url
logger.warn("Failed to download track %s\n%s", track.url, e)
self.notification = "Failed to download {}. Failsafe skipping...".format(
track.title)
self.state = States.PAUSED
self._skip_track()
return
# TODO: Add exception handling
# TODO: Change ffmpeg_options for docker version
#ffmpeg_options = '-protocol_whitelist "file,http,https,tcp,tls"'
#audio_source = discord.FFmpegPCMAudio(sound_file, before_options=ffmpeg_options)
audio_source = discord.FFmpegPCMAudio(sound_file)
# Set volume and play audio
audio_source = discord.PCMVolumeTransformer(audio_source, volume=self.volume)
self.voice_client.play(audio_source)
self.source = audio_source
# Record progress time
self.progress = 0
self.start_time = time.time()
self.state = States.PLAYING
self.timer_task = asyncio.ensure_future(self._track_timer(*self._get_delay()))
if skipped:
self.notification = (
'The track *{}* was cut short because it exceeded '
'the song length threshold of {} seconds.'.format(
_build_hyperlink(self.bot, skipped), self.threshold))
elif self.first_time_startup and author:
self.notification = '{} started the player'.format(author.mention)
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
data.add(self.bot, __name__, 'last_index', self.track_index, guild_id=self.guild.id)
async def pause(self, author=None):
if (self.state in (States.PAUSED, States.LOADING, States.STOPPED) or
self.voice_client is None or not self.voice_client.is_playing()):
return
if self.timer_task:
self.timer_task.cancel()
self.voice_client.pause()
self.state = States.PAUSED
self.progress += time.time() - self.start_time
author_text = '{} paused the player'.format(author.mention) if author else ''
asyncio.ensure_future(self.update_interface(
notification_text=author_text, ignore_ratelimit=True))
async def stop(self, text="The player has been stopped."):
logger.debug("Stopping the player!")
await utilities.stop_audio(self.bot, self.guild)
self.state = States.STOPPED
self.now_playing = None
try:
if self.voice_client:
self.voice_client.stop()
if self.timer_task:
self.timer_task.cancel()
if self.command_task:
self.command_task.cancel()
if self.progress_task:
self.progress_task.cancel()
if self.state_check_task:
self.state_check_task.cancel()
if self.chat_mirror_task:
self.chat_mirror_task.cancel()
except Exception as e:
logger.warn("Failed to stop some task. %s", e)
try:
asyncio.ensure_future(self.satellite_message.delete())
asyncio.ensure_future(self.mirror_message.delete())
asyncio.ensure_future(self.message.clear_reactions())
asyncio.ensure_future(self.message.edit(content=text, embed=None))
except Exception as e:
logger.warn("Failed to modify the original message %s", e)
pass
async def track_navigate(self, use_skip, member):
"""Navigates the track (next, previous, or repeat). Returns True if successful."""
is_dj = data.has_custom_role(self.bot, __name__, 'dj', member=member)
# Build skip text
use_repeat = time.time() - self.start_time >= 10 and self.now_playing
self_skip = False
if use_skip:
skip_format = '{} skipped {}'
if self.now_playing and self.now_playing.userid == member.id:
self_skip = True
elif not self.now_playing:
skip_format = '{} played the queued track'
else:
if self.now_playing and (use_repeat or self.mode == Modes.QUEUE):
skip_format = '{} repeated {}'
elif self.now_playing:
skip_format = '{} skipped back from {}'
else:
skip_format = '{} skipped back a track'
# Skip track only if the user is a DJ or was the one that added it
if not self_skip and not is_dj and not self.control == Control.ALL:
return False
if self.now_playing:
track_details = _build_track_details(
self.bot, self.now_playing, self.track_index)
else:
track_details = ''
self.notification = skip_format.format(member.mention, track_details)
# Determine track delta
if self.mode == Modes.PLAYLIST:
# Repeat track if more than 10 seconds have elapsed
start_delta = 1 if self.now_playing else 0
delta = start_delta if use_skip else (0 if use_repeat else -1)
else:
delta = 0 if use_skip else -1
if self.mode == Modes.PLAYLIST and self.shuffle and delta != 0:
last_track = None
if not use_skip and self.shuffle_stack: # Check shuffle stack first
last_track_id = self.shuffle_stack.pop()
for new_track_index, track in enumerate(self.tracklist):
if track.id == last_track_id:
last_track = track
break
if last_track is None:
if self.now_playing:
self.shuffle_stack.append(self.now_playing.id)
if len(self.tracklist) > 1:
new_track_index = random.randint(0, len(self.tracklist) - 2)
if new_track_index >= self.track_index:
new_track_index += 1
else:
new_track_index = 0
else:
new_track_index = self.track_index + delta
asyncio.ensure_future(self.play(track_index=new_track_index))
return True
async def _command_listener(self, resume=False):
valid_commands = ('⏮', '⏯', '⏭', '⏹', '🔀', '🎵', '⬅', '⏺', '➡', '⏏', '❓')
async def _add_buttons():
"""Adds the buttons in the background to show interface immediately."""
for reaction in valid_commands:
try:
await self.message.add_reaction(reaction)
except Exception as e:
logger.warn("Failed to add reaction: %s", e)
# Check reactions are proper
for reaction in self.message.reactions:
users = await self.bot.get_reaction_users(reaction)
for user in users:
if user != self.bot.user:
await self.message.remove_reaction(reaction.emoji, user)
# Safety interface update
asyncio.ensure_future(self.update_interface())
await asyncio.sleep(1)
self.loading_interface = False
self.progress_task = asyncio.ensure_future(self._progress_loop())
self.state_check_task = asyncio.ensure_future(self._listener_loop())
self.chat_mirror_task = asyncio.ensure_future(self._chat_mirror_loop())
self.page = int(self.track_index / 5)
asyncio.ensure_future(self.update_interface())
asyncio.ensure_future(_add_buttons())
# Startup - finished loading basics
if self.state == States.LOADING:
self.state = States.PLAYING if resume else States.PAUSED
try: # TODO: Remove try/except block
while True:
# Wait on reaction command
kwargs = {'check': lambda r, u: r.message.id == self.message.id and not u.bot}
logger.debug("Waiting on command...")
result = await self.bot.wait_for('reaction_add', **kwargs)
if result is None or self.state == States.STOPPED:
return
elif result[1] == self.bot.user:
continue
# Check validity of reaction
command, member = result[0].emoji, result[1]
logger.debug("Player interaction: %s: %s", member, command)
is_dj = data.has_custom_role(self.bot, __name__, 'dj', member=member)
if not await utilities.can_interact(self.bot, member, channel_id=self.channel.id):
continue
asyncio.ensure_future(self.message.remove_reaction(command, member))
if not is_dj and (member not in self.voice_channel.members or
self.state == States.LOADING or
command not in valid_commands):
continue
# Check player control type
restricted_commands = [
set(), # Public
(valid_commands[0],) + valid_commands[3:5], # Partially public
valid_commands[:10] # DJ Only
][self.control]
if command in restricted_commands and not is_dj:
logger.debug("Ignoring command (insufficient permissions)")
continue
# Play/pause and skip
if command in valid_commands[:3]:
logger.debug("Play|pause and skip selected")
# Play/pause
if command == valid_commands[1]:
permissions = self.control == Control.ALL or is_dj
if self.state == States.PLAYING and permissions:
asyncio.ensure_future(self.pause(author=member))
elif self.state == States.PAUSED:
if permissions or self.autopaused or self.first_time_startup:
asyncio.ensure_future(self.play(author=member))
# Skip
elif self.state != States.LOADING:
use_skip = command == valid_commands[2]
asyncio.ensure_future(self.track_navigate(use_skip, member))
# Stop player
elif command == valid_commands[3]:
await self.stop(
text="The player has been stopped by {}.".format(member.mention))
return
# Shuffle mode
elif command == valid_commands[4]:
if self.mode == Modes.PLAYLIST:
self.shuffle = not self.shuffle
data.add(
self.bot, __name__, 'shuffle', self.shuffle, guild_id=self.guild.id)
asyncio.ensure_future(self.update_interface())
# Generate tracklist
elif command == valid_commands[5]:
logger.debug("Tracklist selected")
if self.tracklist:
if self.tracklist_time != self.tracklist_update_time:
self.tracklist_time = self.tracklist_update_time
tracklist_string = await _build_tracklist(
self.bot, self.guild, self.tracklist)
tracklist_file = utilities.get_text_as_file(tracklist_string)
url = await utilities.upload_to_discord(
self.bot, tracklist_file, filename='tracklist.txt')
self.tracklist_url = url
text = '[Click here]({}) to download the tracklist'.format(
self.tracklist_url)
asyncio.ensure_future(self.update_interface(notification_text=text))
# Track list navigation
elif command in valid_commands[6:9]:
logger.debug("Track list navigation selected")
if command == valid_commands[7]: # Reset to the current page
self.page = int(self.track_index / 5)
else:
self.page += -1 if command == valid_commands[6] else 1
asyncio.ensure_future(self.update_interface(ignore_ratelimit=True))
# Voteskip
elif command == valid_commands[9]:
logger.debug("Vote skip selected")
if self.state != States.PLAYING or member.bot:
continue
elif member.id in self.skip_voters:
self.skip_voters.remove(member.id)
logger.debug("Vote by %s was removed.", member)
elif member in self.voice_channel.members:
self.skip_voters.append(member.id)
logger.debug("Vote by %s was added.", member)
else:
continue
self.update_listeners()
# Help
elif command == valid_commands[10]:
logger.debug("Help selected")
button_help = (
'⏮, ⏯, ⏭, ⏹: Back, Play|Pause, Next, Stop\n'
'🔀: Shuffle (playlist mode only)\n'
'🎵: Generate tracklist\n'
'⬅, ➡: Track page navigation\n'
'⏺: Reset track page to current playing track\n'
'⏏: Voteskip (must be listening)\n'
'❓: This help page'
)
permissions_help = (
'**DJs only:** Only DJs can manage the player.\n'
'**Partially public:** Everybody can '
'add tracks, change track pages, and voteskip. '
'You can skip your own tracks as well.\n'
'**Public:** Everybody has full control '
'(except removing other people\'s '
'tracks and importing tracklists).'
)
status_help = (
':arrow_forward: (Blue): Playing a track\n'
':pause_button: (White): Paused\n'
':arrows_counterclockwise: (Orange): Loading'
)
command_help = (
'To add tracks:\n`{0}`\u200b{1[3].help_string}\n'
'To remove tracks:\n`{0}`\u200b{1[4].help_string}\n'
'To add tracks and/or skip to a track:\n'
'`{0}`\u200b{1[11].help_string}\n\n'
'Examples (using the shortcut):\n'
'`{0}add Erasure Always`\n'
'`{0}remove 1`\n'
'`{0}play Toto Africa`\n'
'`{0}play track 7`\n'
'For more, type: `help playlist`'
).format(
utilities.get_invoker(self.bot, guild=self.guild),
self.bot.commands['playlist'].subcommands)
help_embed = discord.Embed(title=':question: Music player help')
help_embed.add_field(name='Basic usage:', value=command_help)
help_embed.add_field(name='Buttons:', value=button_help)
help_embed.add_field(name='Control types:', value=permissions_help)
help_embed.add_field(name='Status icons:', value=status_help)
asyncio.ensure_future(member.send(embed=help_embed))
except Exception as e:
if not isinstance(e, asyncio.CancelledError):
self.bot.extra = e
logger.warn("Something bad happened (%s). %s", type(e), e)
# Link builders
def _build_hyperlink(bot, track):
full_title = track.title.replace('`', '').replace('*', '')
title = _truncate_title(full_title)
return '[{0}]({1} "{2} (added by <@{3}>)")'.format(title, track.url, full_title, track.userid)
def _build_shortlink(bot, track):
"""Like _build_hyperlink, but for the URL portion only."""
display_url = 'http://dis.gd' if len(track.url) > URL_LIMIT else track.url
display_title = _truncate_title(track.title.replace('`', ''))
return '({} "{} (added by <@{}>)")'.format(display_url, display_title, track.userid)
def _build_track_details(bot, track, index):
"""Creates a string that shows a one liner of the track"""
full_title = track.title.replace('`', '').replace('*', '')
title = _truncate_title(full_title)
return '[[Track {}]({} "{} (added by <@{}>)")] ({}) *{}*'.format(
index + 1, track.url, full_title, track.userid,
utilities.get_time_string(track.duration), title)
def _truncate_title(text, limit=TITLE_LIMIT):
"""Truncates the text to the given limit if it is too long."""
return (text[:limit] + '…') if len(text) > limit else text
def _get_tracklist(bot, guild):
cursor = data.db_select(
bot, from_arg='playlist', additional='ORDER BY id ASC', table_suffix=guild.id)
return cursor.fetchall() if cursor else ()
def _get_music_player(bot, guild):
return data.get(bot, __name__, 'music_player', guild_id=guild.id, volatile=True)
async def _check_active_player(bot, guild, autodelete_time=5):
"""Tries to get the active music player and whether or not the interface is active."""
import_lock = data.get(bot, __name__, 'import_lock', guild_id=guild.id, volatile=True)
if import_lock:
raise CBException("A track import is in progress. Please wait for it to finish.")
music_player = _get_music_player(bot, guild)
if music_player:
await music_player.update_state()
use_player_interface = music_player.state is not States.STOPPED
else:
use_player_interface = False
autodelete = autodelete_time if use_player_interface else 0
return music_player, use_player_interface, autodelete
def _check_total_tracks_limits(bot, author):
"""Ensures that limits of the track list are respected. Returns tracklist."""
# Limits
user_track_limit = data.get(
bot, __name__, key='user_track_limit', guild_id=author.guild.id,
default=configurations.get(bot, __name__, key='max_user_track_limit'))
total_track_limit = data.get(
bot, __name__, key='total_track_limit', guild_id=author.guild.id,
default=configurations.get(bot, __name__, key='max_total_track_limit'))
# Checks
tracklist = _get_tracklist(bot, author.guild)
if data.has_custom_role(bot, __name__, 'dj', member=author): # DJs ignore limits
return tracklist
if total_track_limit and len(tracklist) >= total_track_limit:
raise CBException("The track limit of {} has been reached.".format(total_track_limit))
user_tracks = [it for it in tracklist if it.userid == author.id]
if user_track_limit and len(user_tracks) >= user_track_limit:
raise CBException(
"You cannot add any more songs right now (limit {}).".format(user_track_limit))
return tracklist
async def _add_track_with_url(bot, guild, check_url, user_id=0, timestamp=0):
"""Checks the given url and adds it to the database."""
options = {'format': 'bestaudio/best', 'noplaylist': True, 'default-search': 'ytsearch'}
downloader = YoutubeDL(options)
# Check for a direct URL (SO: 7160737)
try:
test = urlparse(check_url)
is_url = test.scheme and test.netloc and test.path
except:
is_url = False
if not is_url and not check_url.lower().startswith('ytsearch:'):
check_url = 'ytsearch:' + check_url.strip()
# Get information about the track
try:
info = await utilities.future(downloader.extract_info, check_url, download=False)
if not is_url: # Select first result on search
info = info['entries'][0]
check_url = info['webpage_url']
except BotException as e:
raise e # Pass up
except Exception as e:
raise CBException("Failed to fetch information from the URL.", e=e)
return await _add_track_to_db(
bot, guild, check_url, info, user_id=user_id, timestamp=timestamp)
async def _add_track_to_db(bot, guild, check_url, info, user_id=0, timestamp=0):
"""Adds the given track info to the database."""
hard_threshold = configurations.get(bot, __name__, key='hard_threshold')
bot.extra = info
try:
chosen_format = info['formats'][0]
download_url = chosen_format['url']
title = info.get('title', 'Unknown')
thumbnail = info.get('thumbnail', None)
likes = info.get('like_count', None)
dislikes = info.get('dislike_count', None)
views = info.get('view_count', None)
description = info.get('description', None)
upload_date = info.get('upload_date', None)
uploader = info.get('uploader', None)
if 'duration' in info:
duration = int(info['duration'])
else: # Manual download and check
extension = chosen_format['ext']
sound_file, filename = await utilities.download_url(
bot, download_url, extension=extension, include_name=True)
duration = int(TinyTag.get(sound_file).duration)
utilities.delete_temporary_file(bot, filename)
except BotException as e:
raise e # Pass up
except Exception as e:
raise CBException("Failed to get duration from the URL.", e=e)
if duration > hard_threshold:
raise CBException(
"Song is longer than the hard threshold of {} seconds.".format(hard_threshold))
# Prepare data for insertion
extra_data = {}
if thumbnail is not None:
extra_data['thumbnail'] = thumbnail
if likes is not None:
extra_data['likes'] = likes
if dislikes is not None:
extra_data['dislikes'] = dislikes
if views is not None:
extra_data['views'] = views
if description is not None:
extra_data['description'] = description
if upload_date is not None:
extra_data['uploaded'] = '{}/{}/{}'.format(
upload_date[4:6], upload_date[6:8], upload_date[:4])
if uploader is not None:
extra_data['uploader'] = uploader
entry_data = [
check_url,
download_url,
title,
duration,
user_id,
timestamp if timestamp else time.time(),
Json(extra_data)
]
return data.db_insert(
bot, 'playlist', table_suffix=guild.id, input_args=entry_data,
create='playlist_template')
async def add_track(bot, context):
"""Adds a track to the playlist (via command)."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
# Check channel restriction
channel_id = data.get(bot, __name__, 'channel', guild_id=context.guild.id)
if not channel_id:
raise CBException("No channel configured for the music player.")
channel_restriction = data.get_channel(bot, channel_id)
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
if context.channel.id != channel_id and not is_dj:
raise CBException("You can only add tracks in {}".format(channel_restriction.mention))
# Check control restriction
control = data.get(
bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)
if not is_dj and control == Control.DJS:
raise CBException("You must be a DJ to add tracks.", autodelete=autodelete)
default_threshold = configurations.get(bot, __name__, key='max_threshold')
default_cutoff = configurations.get(bot, __name__, key='max_cutoff')
guild_id = context.guild.id
threshold = data.get(bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)
cutoff = data.get(bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)
# Add track to the playlist
check_url = context.arguments[0]
try:
tracklist = _check_total_tracks_limits(bot, context.author)
cursor = await _add_track_with_url(
bot, context.guild, check_url, user_id=context.author.id)
track = cursor.fetchone()
except BotException as e:
e.autodelete = autodelete
raise e
response = '{} added {}'.format(
context.author.mention, _build_track_details(bot, track, len(tracklist)))
if track.duration > threshold:
response += (
"\nTrack is longer than the threshold length ({} seconds), so "
"only the first {} seconds will be played".format(threshold, cutoff))
# Check the music player again, as it may have stopped while we were download the url
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
if use_player_interface:
music_player.update_tracklist()
await music_player.update_interface(notification_text=response)
return Response(
embed=discord.Embed(description=response),
message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,
delete_after=autodelete if use_player_interface else None,
extra=autodelete if use_player_interface else None)
async def remove_track(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
# Check track index
tracklist = _get_tracklist(bot, context.guild)
if not tracklist:
raise CBException("The playlist queue is empty.", autodelete=autodelete)
index = context.arguments[0] - 1
if not 0 <= index < len(tracklist):
raise CBException("Invalid index. Must be between 1 and {} inclusive.".format(
len(tracklist)), autodelete=autodelete)
# Check permissions
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
control = data.get(
bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)
track = tracklist[index]
if control == Control.DJS and not is_dj:
raise CBException("You must be a DJ to remove entries.", autodelete=autodelete)
elif track.userid != context.author.id and not is_dj:
raise CBException(
"You must be the user who added the entry, or a DJ.", autodelete=autodelete)
data.db_delete(
bot, 'playlist', table_suffix=context.guild.id,
where_arg='id=%s', input_args=[track.id])
response = '{} removed {}'.format(
context.author.mention, _build_track_details(bot, track, index))
# Change current index if necessary
if use_player_interface:
music_player.update_tracklist()
if music_player.mode == Modes.PLAYLIST:
use_skip = index == music_player.track_index
if index <= music_player.track_index: # Shift track index down
music_player.track_index -= 1
if use_skip: # Skip track due to removing the current track
music_player._skip_track()
await music_player.update_interface(notification_text=response)
return Response(
embed=discord.Embed(description=response),
message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,
delete_after=autodelete if use_player_interface else None,
extra=autodelete if use_player_interface else None)
async def _build_tracklist(bot, guild, tracklist):
header = (
'# Tracklist generated: {3[1]} {3[0]}\r\n'
'# Guild: {0}\r\n'
'# Total tracks: {1}\r\n'
'# Runtime: {2}\r\n'
).format(
guild.name, len(tracklist),
utilities.get_time_string(sum(it.duration for it in tracklist), text=True, full=True),
utilities.get_timezone_offset(
bot, guild_id=guild.id, utc_dt=datetime.utcnow(), as_string=True))
tracklist_text_list = [header]
template = (
'{}: |\r\n'
' {}\r\n' # Title
' {}\r\n' # URL
' Added by {} at {} {}\r\n' # Info
' Duration: {} ID|Timestamp: {}|{}\r\n' # Duration, internal info
)
all_guild_members = await guild.fetch_members(limit=None).flatten()
for index, track in enumerate(tracklist):
track_author = (
(await data.fetch_member(bot, track.userid, safe=True, search=all_guild_members)) or
'Unknown')
offset, upload_time = utilities.get_timezone_offset(
bot, guild_id=guild.id, utc_seconds=track.timestamp, as_string=True)
upload_time_text = time.strftime('%H:%M %m/%d/%Y', time.gmtime(upload_time))
tracklist_text_list.append(template.format(
index + 1, track.title, track.url, track_author, upload_time_text, offset,
utilities.get_time_string(track.duration), track.userid, track.timestamp))
return '\r\n'.join(tracklist_text_list)
async def format_tracklist(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
# Format tracklist into user-friendly yaml
tracklist = _get_tracklist(bot, context.guild)
if not tracklist:
raise CBException("The playlist queue is empty.", autodelete=autodelete)
tracklist_string = await _build_tracklist(bot, context.guild, tracklist)
tracklist_file = utilities.get_text_as_file(tracklist_string)
if use_player_interface:
url = await utilities.upload_to_discord(bot, tracklist_file, filename='tracklist.txt')
await music_player.update_interface(
notification_text='[Click here]({}) to download the current tracklist'.format(url))
return Response(content='Tracklist file updated.', delete_after=5)
else:
return Response(
content='Tracks:', file=discord.File(tracklist_file, filename='tracklist.txt'))
async def import_tracklist(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
use_youtube_playlist = 'youtube' in context.options
if not (bool(context.message.attachments) ^ use_youtube_playlist):
raise CBException(
"Must include an attachment or a YouTube playlist URL.", autodelete=autodelete)
if not data.has_custom_role(bot, __name__, 'dj', member=context.author):
raise CBException("You must be a DJ to import tracks.")
if use_player_interface:
raise CBException(
'The player must be stopped before importing tracks.', autodelete=autodelete)
data.add(bot, __name__, 'import_lock', True, guild_id=context.guild.id, volatile=True)
try:
# Get tracklist data from playlist URL
if use_youtube_playlist:
downloader = YoutubeDL()
info = await utilities.future(
downloader.extract_info, context.options['youtube'], download=False)
# tracklist_data = list(it['webpage_url'] for it in info['entries'])
tracklist_data = info['entries']
# Get tracklist data from file
else:
use_youtube_playlist = False
file_url = context.message.attachments[0].url
tracklist_file = await utilities.download_url(bot, file_url, use_fp=True)
tracklist_data = yaml.safe_load(tracklist_file)
if isinstance(tracklist_data, str): # Read lines instead
tracklist_file.seek(0)
tracklist_blob = tracklist_file.read().decode('utf8').replace('\r\n', '\n').strip()
tracklist_data = tracklist_blob.split('\n')
logger.debug("Tracklist data: %s", tracklist_data)
if not tracklist_data or len(tracklist_data) == 0:
raise CBException("The tracklist file is empty.")
elif len(tracklist_data) > 100:
raise CBException("Cannot import more than 100 tracks at a time.")
except Exception as e:
data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)
if isinstance(e, BotException):
raise e
else:
raise CBException("Failed to load the tracklist file.", e=e)
return Response(
content="Importing tracks...",
message_type=MessageTypes.ACTIVE,
extra=(tracklist_data, use_youtube_playlist),
extra_function=_import_tracklist_status)
async def _import_tracklist_status(bot, context, response):
last_update_time = time.time()
total_imported = 0
tracklist_data, use_youtube_playlist = response.extra
async def _update_notification(last_update_time):
if time.time() - last_update_time > 5:
await response.message.edit(content="Importing tracks... [ {} / {} ]".format(
total_imported, len(tracklist_data)))
return time.time()
return last_update_time
try:
if use_youtube_playlist:
for info in tracklist_data:
await _add_track_to_db(
bot, context.guild, info['webpage_url'], info,
context.author.id, int(time.time()))
total_imported += 1
last_update_time = await _update_notification(last_update_time)
else:
if isinstance(tracklist_data, list):
tracklist_data = OrderedDict((it[0], it[1]) for it in enumerate(tracklist_data))
for _, track_blob in sorted(tracklist_data.items()):
cleaned = track_blob.strip()
if not cleaned:
continue
elif '\n' in cleaned:
title, url, _, info, _ = track_blob.split('\n')
user_id, _, timestamp = info.split()[3].partition('|')
else:
title = url = track_blob
user_id, timestamp = context.author.id, time.time()
_check_total_tracks_limits(bot, context.author)
await _add_track_with_url(bot, context.guild, url, int(user_id), int(timestamp))
total_imported += 1
last_update_time = await _update_notification(last_update_time)
except Exception as e:
data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)
try:
raise CBException("Failed to import track {}".format(title), e=e)
except NameError:
raise CBException("Failed to import tracks.", e=e)
data.remove(bot, __name__, 'import_lock', guild_id=context.guild.id, volatile=True)
await response.message.edit(content="Imported {} track{}.".format(
total_imported, '' if total_imported == 1 else 's'))
async def get_info(bot, context):
"""Gets the information for the given track in the playlist."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
tracklist = _get_tracklist(bot, context.guild)
if not tracklist:
raise CBException("The playlist queue is empty.", autodelete=autodelete)
index = context.arguments[0] - 1
if not 0 <= index < len(tracklist):
raise CBException("Invalid index. Must be between 1 and {} inclusive.".format(
len(tracklist)), autodelete=autodelete)
track_info = tracklist[index]
title = _truncate_title(track_info.title)
time_ago = time.time() - track_info.timestamp
added_by_text = "Added by <@{}> {} ago.".format(
track_info.userid, utilities.get_time_string(time_ago, text=True))
duration_text = "Duration: ({})".format(utilities.get_time_string(track_info.duration))
response = "Info for track {}:".format(index + 1)
if use_player_interface: # Add notification
track_link = _build_hyperlink(bot, track_info)
info_text = "{}\n{}\n{}\n{}".format(response, track_link, duration_text, added_by_text)
music_player.page = int(index / 5)
await music_player.update_interface(notification_text=info_text, ignore_ratelimit=True)
return Response(message_type=MessageTypes.REPLACE, extra=autodelete)
else:
response += "\n{}\n{}\n{}\n{}".format(title, track_info.url, duration_text, added_by_text)
return Response(content=response)
async def set_volume(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
# Check control restriction
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
control = data.get(
bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)
if not is_dj and control != Control.ALL:
raise CBException("You must be a DJ to change the volume.", autodelete=autodelete)
volume = context.arguments[0]
data.add(bot, __name__, 'volume', volume, guild_id=context.guild.id)
if use_player_interface:
music_player.update_config()
await music_player.update_interface(
notification_text='<@{}> set the volume to {:.2f}%'.format(
context.author.id, volume * 100))
return Response(
content="Volume set to {:.2f}%.".format(volume * 100),
message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,
delete_after=autodelete if use_player_interface else None,
extra=autodelete if use_player_interface else None)
async def configure_player(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(
bot, context.guild, autodelete_time=10)
options = context.options
if use_player_interface:
if 'switchmode' in options:
raise CBException(
"Cannot switch player modes while it is active.", autodelete=autodelete)
elif 'channel' in options:
raise CBException(
"Cannot set text channel while the player is active.", autodelete=autodelete)
guild_id = context.guild.id
changes = []
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
is_mod = context.elevation > 0
dj_prereq = "You must be a DJ in order to "
mod_prereq = "You must be a bot moderator in order to "
if 'threshold' in options:
if not is_dj:
raise CBException(dj_prereq + "change the length threshold.")
threshold = options['threshold']
data.add(bot, __name__, 'threshold', threshold, guild_id=guild_id)
changes.append('Duration threshold set to {} seconds.'.format(threshold))
if 'cutoff' in options:
if not is_dj:
raise CBException(dj_prereq + "change the length cutoff.")
cutoff = options['cutoff']
data.add(bot, __name__, 'cutoff', cutoff, guild_id=guild_id)
changes.append('Cutoff set to {} seconds.'.format(cutoff))
if 'usertracks' in options:
if not is_dj:
raise CBException(dj_prereq + "change the user track limit.")
limit = options['usertracks']
data.add(bot, __name__, 'user_track_limit', limit, guild_id=guild_id)
changes.append('User track limit set to {} track(s).'.format(limit))
if 'totaltracks' in options:
if not is_dj:
raise CBException(dj_prereq + "change the total track limit.")
limit = options['totaltracks']
data.add(bot, __name__, 'total_track_limit', limit, guild_id=guild_id)
changes.append('Total track limit set to {} track(s).'.format(limit))
if 'djrole' in options:
if not is_mod:
raise CBException(mod_prereq + "change the DJ role.")
dj_role = options['djrole']
data.add_custom_role(bot, __name__, 'dj', dj_role)
changes.append('Set the DJ role to {}.'.format(dj_role.mention))
if 'channel' in options:
if not is_mod:
raise CBException(mod_prereq + "change the player channel.")
text_channel = options['channel']
data.add(bot, __name__, 'channel', text_channel.id, guild_id=guild_id)
changes.append('Set the text channel restriction to {}.'.format(text_channel.mention))
if 'switchcontrol' in options:
if not is_mod:
raise CBException(mod_prereq + "cycle control modes.")
control = data.get(bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)
control = 0 if control == len(Control) - 1 else control + 1
data.add(bot, __name__, 'control', control, guild_id=guild_id)
changes.append('Cycled the playlist permissions control mode to: {}'.format(
('Public', 'Partially public', 'DJs only')[control]))
if 'switchmode' in options:
if not is_mod:
raise CBException(mod_prereq + "cycle player modes.")
mode = data.get(bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)
mode = 0 if mode == len(Modes) - 1 else mode + 1
data.add(bot, __name__, 'mode', mode, guild_id=guild_id)
changes.append('Cycled the playlist mode to: {}'.format(('Playlist', 'Queue')[mode]))
if 'mirrorchat' in options:
if not is_mod:
raise CBException(mod_prereq + "toggle chat mirroring.")
mirror = not data.get(bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)
data.add(bot, __name__, 'mirror_chat', mirror, guild_id=guild_id)
changes.append('{}abled chat mirroring.'.format('En' if mirror else 'Dis'))
if 'autodisconnect' in options:
if not is_mod:
raise CBException(mod_prereq + "toggle automatic disconnecting.")
auto_disconnect = not data.get(
bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)
data.add(bot, __name__, 'auto_disconnect', auto_disconnect, guild_id=guild_id)
changes.append('{}abled auto disconnecting.'.format('En' if auto_disconnect else 'Dis'))
# Defaults
default_threshold = configurations.get(bot, __name__, key='max_threshold')
default_cutoff = configurations.get(bot, __name__, key='max_cutoff')
default_total_track_limit = configurations.get(bot, __name__, key='max_total_track_limit')
default_user_track_limit = configurations.get(bot, __name__, key='max_user_track_limit')
# Format and display all settings
threshold = data.get(bot, __name__, 'threshold', guild_id=guild_id, default=default_threshold)
cutoff = data.get(bot, __name__, 'cutoff', guild_id=guild_id, default=default_cutoff)
total_track_limit = data.get(
bot, __name__, key='total_track_limit',
guild_id=guild_id, default=default_total_track_limit)
user_track_limit = data.get(
bot, __name__, key='user_track_limit',
guild_id=guild_id, default=default_user_track_limit)
dj_role = data.get_custom_role(bot, __name__, 'dj', context.guild)
control = data.get(bot, __name__, 'control', guild_id=guild_id, default=Control.PARTIAL)
mode = data.get(bot, __name__, 'mode', guild_id=guild_id, default=Modes.QUEUE)
chat_mirroring = data.get(bot, __name__, 'mirror_chat', guild_id=guild_id, default=False)
auto_disconnect = data.get(bot, __name__, 'auto_disconnect', guild_id=guild_id, default=False)
text_channel_id = data.get(bot, __name__, 'channel', guild_id=guild_id)
text_channel = context.guild.get_channel(text_channel_id)
embed = discord.Embed(
title='Player configuration', description=(
'Text channel: {}\nTotal track limit: {}\n'
'User track limit: {}\nThreshold: {}\nCutoff: {}\n'
'DJ Role: {}\nControl: {}\nPlayer mode: {}\n'
'Chat mirroring: {}\nAutomatic disconnecting: {}'.format(
text_channel.mention if text_channel else 'None',
'{} tracks'.format(total_track_limit),
'{} tracks'.format(user_track_limit),
'{} seconds'.format(threshold),
'{} seconds'.format(cutoff),
dj_role.mention if dj_role else 'None',
('Public', 'Partially public', 'DJs only')[control],
('Repeating playlist', 'Single play queue')[mode],
chat_mirroring,
auto_disconnect)
)
)
if changes:
embed.add_field(name="Changes", value='\n'.join(changes))
if use_player_interface:
music_player.update_config()
await music_player.update_interface('{}:\n{}'.format(
context.author.mention, '\n'.join(changes)))
return Response(
embed=embed,
message_type=MessageTypes.REPLACE if use_player_interface else MessageTypes.NORMAL,
delete_after=autodelete if use_player_interface else None,
extra=autodelete if use_player_interface else None)
async def clear_playlist(bot, context):
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
if use_player_interface:
raise CBException(
"Cannot clear playlist tracks when the player is active.", autodelete=autodelete)
return Response(
content="Say 'yes' to confirm clearning the playlist.",
message_type=MessageTypes.WAIT,
extra_function=_confirm_clear_playlist,
extra={
'event': 'message',
'kwargs': {
'timeout': 30, # Default 300
'check': lambda m: m.author == context.author,
}
}
)
async def _confirm_clear_playlist(bot, context, response, result):
"""Menu for confirming a playlist clear."""
if result is None: # Timed out
edit = 'Playlist clear timed out.'
elif result.content.lower() == 'yes':
# music_player = _get_music_player(bot, context.guild)
_, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
if use_player_interface:
raise CBException(
"Cannot clear playlist tracks when the player is active.", autodelete=autodelete)
data.db_drop_table(bot, 'playlist', table_suffix=context.guild.id, safe=True)
edit = 'Playlist has been cleared.'
else:
edit = 'Playlist clear cancelled.'
await response.message.edit(content=edit)
async def skip_to_page(bot, context):
"""Skips to a certain page of the tracklist in the player interface."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
if not use_player_interface:
raise CBException("The player interface must be active.")
# Check page number
tracklist = music_player.tracklist
page_number = context.arguments[0] - 1
total_pages = max(int((len(tracklist) + 4) / 5), 1)
if not 0 <= page_number <= total_pages - 1:
raise CBException(
"Invalid page number. Must be between 1 and {} inclusive.".format(total_pages),
autodelete=autodelete)
music_player.page = page_number
await music_player.update_interface(ignore_ratelimit=True)
return Response(message_type=MessageTypes.REPLACE, extra=1)
async def swap_tracks(bot, context):
"""Swaps the given two tracks in the playlist."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
# Check control restriction
control = data.get(
bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
if not is_dj and control != Control.ALL:
raise CBException("You must be a DJ to swap tracks.", autodelete=autodelete)
# Check index validity
tracklist = _get_tracklist(bot, context.guild)
swap = []
for index in context.arguments:
if not 1 <= index <= len(tracklist):
raise CBException(
"Index must be between 1 and {}".format(len(tracklist)),
autodelete=autodelete)
swap.append(tracklist[index - 1])
# Swap tracks
set_arg = (
'(url, downloadurl, title, duration, userid, timestamp, extra) = '
'(%s, %s, %s, %s, %s, %s, %s)')
for index, track in enumerate(swap):
data.db_update(
bot, 'playlist', table_suffix=context.guild.id,
set_arg=set_arg, where_arg='id=%s', input_args=[
track.url, track.downloadurl, track.title, track.duration, track.userid,
track.timestamp, Json(track.extra), swap[index - 1].id])
# Add notification and skip track if necessary
response = '{} swapped tracks {} and {}'.format(context.author.mention, *context.arguments)
if use_player_interface:
music_player.update_tracklist()
if music_player.track_index + 1 in context.arguments:
asyncio.ensure_future(music_player.play(track_index=music_player.track_index))
await music_player.update_interface(notification_text=response, ignore_ratelimit=True)
return Response(message_type=MessageTypes.REPLACE, extra=autodelete)
else:
return Response(content=response)
async def _check_player_restrictions(
bot, context, music_player, use_player_interface, autodelete):
"""Ensures that the user in the context can interact with the player."""
# Channel restriction checks
channel_restriction_id = data.get(bot, __name__, 'channel', guild_id=context.guild.id)
if channel_restriction_id not in [it.id for it in context.guild.channels]:
raise CBException(
"The music player does not have an assigned text channel. Please see "
"`{}help playlist configure` for more information.".format(
utilities.get_invoker(bot, guild=context.guild)))
if channel_restriction_id != context.channel.id:
channel_restriction = data.get_channel(bot, channel_restriction_id, guild=context.guild)
raise CBException(
"The music player must be used in the assigned text channel, {}.".format(
channel_restriction.mention))
# Voice channel checks
if not context.author.voice:
raise CBException(
"You must be in a voice channel to use the player.", autodelete=autodelete)
voice_channel = context.author.voice.channel
if use_player_interface and music_player.voice_channel != voice_channel:
raise CBException(
"You must be in the same voice channel as the bot.", autodelete=autodelete)
elif use_player_interface and music_player.state == States.LOADING:
raise CBException("Playlist is loading, please wait.", autodelete=autodelete)
async def control_player(bot, context):
"""Basic control of the player (like pausing/stopping/skipping etc."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
if len(context.options) != 2:
raise CBException("Only one action must be provided.", autodelete=autodelete)
if not use_player_interface:
raise CBException("The music player is not active.")
await _check_player_restrictions(bot, context, music_player, use_player_interface, autodelete)
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
permissions = music_player.control == Control.ALL or is_dj
try:
action = "[Unknown]"
if 'next' in context.options or 'skip' in context.options:
action = 'skip the current track'
assert permissions or music_player.control == Control.PARTIAL
result = await music_player.track_navigate(True, context.author)
if not result: # Add to vote skip list instead
if (music_player.state == States.PLAYING and
context.author.id not in music_player.skip_voters):
action += '. Voting to skip instead'
music_player.skip_voters.append(context.author.id)
music_player.update_listeners()
assert False
elif 'resume' in context.options:
action = 'resume the player'
assert permissions or music_player.autopaused or music_player.first_time_startup
asyncio.ensure_future(music_player.play(author=context.author))
else:
if 'pause' in context.options:
action = 'pause the player'
assert permissions
asyncio.ensure_future(music_player.pause(author=context.author))
elif 'stop' in context.options:
action = 'stop the player'
assert permissions
asyncio.ensure_future(music_player.stop(
text="The player has been stopped by {}.".format(context.author.mention)))
elif 'previous' in context.options:
action = 'skip to the previous track'
assert permissions
asyncio.ensure_future(music_player.track_navigate(False, context.author))
except AssertionError:
raise CBException(
"You have insufficient permissions to {}.".format(action),
autodelete=autodelete)
# Delete message
return Response(message_type=MessageTypes.REPLACE, extra=1)
async def setup_player(bot, context):
"""Starts the player interface and starts playing a track if selected."""
music_player, use_player_interface, autodelete = await _check_active_player(bot, context.guild)
await _check_player_restrictions(bot, context, music_player, use_player_interface, autodelete)
use_play_command = context.subcommand.id == 'play'
if use_play_command and (context.arguments[0] and 'track' in context.options):
raise CBException(
"Cannot supply the track and query paramters at the same time.",
autodelete=autodelete)
# Check given track index if given
# Get mode from persistent data because the player may not exist yet
track_index = None
track = None
adding_track = False
if use_play_command:
if 'track' in context.options: # Play track index
track_index = context.options['track']
tracklist = _get_tracklist(bot, context.guild)
if not 0 < track_index <= len(tracklist):
raise CBException(
"Track index must be between 1 and {} inclusive.".format(len(tracklist)),
autodelete=autodelete)
track_index -= 1
track = tracklist[track_index]
elif context.arguments[0]: # Query given (add track)
adding_track = True
add_track_response = await add_track(bot, context)
await bot.handle_response(context.message, add_track_response, context=context)
await _check_player_restrictions(
bot, context, music_player, use_player_interface, autodelete
)
# Check autoplay permissions
use_autoplay = False
if use_play_command:
is_dj = data.has_custom_role(bot, __name__, 'dj', member=context.author)
control_type = data.get(
bot, __name__, 'control', guild_id=context.guild.id, default=Control.PARTIAL)
use_autoplay = (
control_type == Control.ALL or is_dj or
(control_type == Control.PARTIAL and
(not music_player or music_player.first_time_startup)))
# Setup new player
if music_player is None or music_player.state == States.STOPPED:
logger.debug("Creating new music player.")
music_player = MusicPlayer(
bot, context.message, autoplay=use_autoplay, track_index=track_index)
data.add(
bot, __name__, 'music_player', music_player, guild_id=context.guild.id, volatile=True)
# Update player message or change tracks
else:
if use_autoplay and track_index is not None:
music_player.notification = '{} skipped to {}'.format(
context.author.mention, _build_track_details(bot, track, track_index))
play_track = bool(
use_autoplay and (music_player.state == States.PAUSED or track_index is not None))
# Check if messages can just be replaced
message_history = await context.channel.history(limit=3).flatten()
message_ids = list(it.id for it in message_history)
if (len(message_history) > 2 and music_player.message.id in message_ids and
not context.subcommand.id == 'show'):
if play_track:
asyncio.ensure_future(music_player.play(
track_index=track_index, author=context.author))
else:
await music_player.set_new_message(
context.message, autoplay=use_autoplay if play_track else None,
track_index=track_index)
# Delete any immediate play/skip commands, but keep track add messages.
if not adding_track:
return Response(message_type=MessageTypes.REPLACE)
```
#### File: JshBot-plugins/ude/ude.py
```python
import json
import datetime
import discord
from jshbot import utilities, configurations, plugins, logger
from jshbot.exceptions import ConfiguredBotException
from jshbot.commands import Command, Response
__version__ = '0.1.0'
CBException = ConfiguredBotException('Emoji updater')
uses_configuration = True
@plugins.command_spawner
def get_commands(bot):
return [Command('ude', elevated_level=3, hidden=True)]
async def get_response(bot, context):
if 'discrank.py' not in bot.plugins:
raise CBException("Discrank plugin not detected.")
discrank_plugin = bot.plugins['discrank.py']
champions, spells = discrank_plugin.CHAMPIONS, discrank_plugin.SPELLS
chunks = [bot.get_guild(it).emojis for it in configurations.get(bot, __name__, 'guilds')]
emojis = [it for chunk in chunks for it in chunk]
final = {
'champions': {'id': {}, 'name': {}},
'spells': {'id': {}, 'name': {}},
'bdt': {'blue': {}, 'red': {}}
}
for emoji in emojis:
if emoji.name.startswith('Champion'):
clean_name = emoji.name.split('_')[1].lower()
if clean_name not in champions:
raise CBException("Champion {} not found.".format(clean_name))
item_id = champions[clean_name]['id']
final['champions']['id'][str(item_id)] = str(emoji)
final['champions']['name'][clean_name] = str(emoji)
elif emoji.name.startswith('Spell'):
clean_name = emoji.name.split('_')[1].lower()
if clean_name not in spells:
raise CBException("Spell {} not found.".format(clean_name))
item_id = spells[clean_name]['id']
final['spells']['id'][str(item_id)] = str(emoji)
final['spells']['name'][clean_name] = str(emoji)
elif emoji.name.startswith(('Red', 'Blue')):
color, name = emoji.name.split('_')
final['bdt'][color.lower()][name.lower()] = str(emoji)
else:
raise CBException("Invalid emoji detected: {}".format(emoji.name))
final_json = json.dumps(final, sort_keys=True, indent=4)
json_file = utilities.get_text_as_file(final_json)
file_url = await utilities.upload_to_discord(
bot, json_file, filename='lol_emojis.json', close=True)
embed = discord.Embed(
description='[Click here to download]({})'.format(file_url),
colour=discord.Colour(0x4CAF50),
timestamp=datetime.datetime.utcnow())
embed.set_footer(text="Updated")
try:
update_channel = bot.get_channel(configurations.get(bot, __name__, 'update_channel'))
message_id = configurations.get(bot, __name__, 'update_message')
update_message = await update_channel.fetch_message(message_id)
await update_message.edit(content='', embed=embed)
except Exception as e:
raise CBException("Failed to edit the update message.", e=e)
return Response(content="Updated!")
``` |
{
"source": "jkchen2/riotapichallenge2k16",
"score": 3
} |
#### File: riotapichallenge2k16/jshbot/commands.py
```python
import discord
import asyncio
import logging
import sys
from jshbot.exceptions import BotException, ErrorTypes
EXCEPTION = 'Commands'
def convert_plans(plans):
'''
Converts user-friendly(ish) plans into the system-friendly version.
Convert: "?opt1 opt2: ::+"
To: [[(True, "opt1", False), (False, "opt2", True)], '::+']
Convert: "*"
To: [[], '*']
'''
new_plans = []
required = True
argument = False
for plan in plans: # Convert each individual plan
split = plan.split()
new_plan = [[], '']
for block in split: # Parse each option
if block[0] in (':', '^', '&', '+', '#'): # Last part
new_plan[1] = block
break
required = block[0] == '?'
argument = block[-1] == ':'
block = block.strip('?').strip(':')
new_plan[0].append((required, block, argument))
new_plans.append(new_plan)
return new_plans
def add_commands(bot, new_commands, plugin):
'''
Checks that all keys in the new dictionary are unique from those in the old
dictionary. If all keys are good, add them to the bot commands dictionary.
'''
# No shortcuts
if not new_commands:
return
# Check that there are no command name collisions
for key in new_commands:
is_shortcut = type(new_commands[key][0]) is str
if key in bot.commands:
raise BotException(ErrorTypes.FATAL, EXCEPTION,
"Attempting to add a command that already exists", key)
if is_shortcut:
bot.commands[key] = new_commands[key]
else:
new_plans = convert_plans(new_commands[key][0]) # Convert and add
bot.commands[key] = ((new_plans, new_commands[key][1]), plugin)
def add_manual(bot, manual):
'''
Adds the manual entries to the bot manual dictionary.
'''
# Do practically the same thing for manual entries
if not manual: # No manual entry :c
return
for key in manual:
if key in bot.manual:
raise BotException(ErrorTypes.FATAL, EXCEPTION,
"Attempting to add a manual entry that already exists", key)
else:
bot.manual[key] = manual[key]
def get_command_pair(bot, base):
'''
Returns a touple of the command pair with the given base and whether or not
it is a shortcut.
'''
try:
is_shortcut = type(bot.commands[base][0]) is str
if is_shortcut:
command_pair = bot.commands[base]
else:
command_pair = bot.commands[base][0]
return (command_pair, is_shortcut)
except KeyError:
return (None, None)
async def execute(bot, message, parsed_command):
'''
Gets the proper response for the parsed command by first getting the plugin,
then calling the get_response function associated with that plugin.
'''
# Get plugin
base = parsed_command[0]
plugin_name = bot.commands[base][1]
plugin = bot.plugins[plugin_name][0]
direct = message.channel.is_private
# Execute plugin's get_response
return await (plugin.get_response(bot, message, parsed_command, direct))
```
#### File: riotapichallenge2k16/jshbot/core.py
```python
import asyncio
import discord
import logging
import os.path
import time
import sys
import os
# Debug
import traceback
from jshbot import configurations, plugins, commands, servers, parser, data
from jshbot.exceptions import ErrorTypes, BotException
EXCEPTION = 'Core'
class Bot(discord.Client):
def __init__(self, debug):
self.version = '0.3.0-alpha'
self.date = 'May 9th, 2016'
self.time = int(time.time())
self.readable_time = time.strftime('%c')
self.debug = debug
if self.debug:
logging.debug("=== Starting up JshBot {} ===".format(self.version))
logging.debug("=== Time: {} ===".format(self.readable_time))
else:
print("=== Starting up JshBot {} ===".format(self.version))
print("=== Time: {} ===".format(self.readable_time))
super().__init__()
self.path = os.path.split(os.path.realpath(__file__))[0][:-7]
logging.debug("Setting directory to {}".format(self.path));
logging.debug("Loading plugins and commands...")
self.commands = {} # Set by get_plugins
self.manual = {} # Set by get_plugins
self.data = {} # Set by individual plugins
self.plugins = plugins.get_plugins(self)
self.directories = data.get_directories(self)
logging.debug("Loading configurations...")
self.configurations = configurations.get_configurations(self)
logging.debug("Loading server data...")
self.servers_data = servers.get_servers_data(self)
# Extras
self.edit_dictionary = {}
def interrupt_say(self, channel_id, message, channel=None):
'''
Allows plugins to send messages without having to return directly from
get_response. This should mostly be avoided, and just used for errors
or other immediately relevant notifications.
'''
if not channel:
try:
channel = discord.utils.get(
self.get_all_channels(), id=channel_id)
except:
raise BotException(ErrorTypes.RECOVERABLE, EXCEPTION,
"Server {} could not be found.".format(server_id))
asyncio.ensure_future(self.send_message(channel, message))
def get_token(self):
return self.configurations['core']['token']
def usage_reminder(self, base):
'''
Uses the base module to get the usage reminder for a command.
'''
base_module = self.plugins['base'][0]
return base_module.get_usage_reminder(self, base)
def can_respond(self, message):
'''
Determines whether or not the bot can respond to the given message.
Checks that the message has text, matches an invoker, and that the
server/channel/user is not muted or blocked. Admins/moderators override.
If the message is a direct message, respond if there is a valid invoker.
'''
# Ignore empty messages and messages by bots
if (not message.content or message.author.bot or
message.author.id == self.user.id):
return False
# Bot responds to mentions only
if self.configurations['core']['mention_mode']:
if (not message.content.startswith(self.user.mention) or
len(message.content.split(' ', 1)) == 1):
return False
# Any command invoker will do
else:
if (message.content[0] not in
self.configurations['core']['command_invokers'] and
not message.content.startswith(self.user.mention)):
return False
# Respond to direct messages
if message.channel.is_private:
return True
author_id = message.author.id
server_data = self.servers_data[message.server.id]
try:
# Owners/moderators override everything
channel_id = message.channel.id
if ((author_id in self.configurations['core']['owners']) or
(author_id in server_data['moderators'])):
return True
# Server/channel muted, or user is blocked
if ((server_data['muted']) or
(channel_id in server_data['muted_channels']) or
(author_id in server_data['blocked'])):
return False
except KeyError as e: # Bot may not have updated fast enough
logging.warn("Failed to find server in can_respond(): " + str(e))
servers.check_all(self)
time.sleep(5) # remove later
return self.can_respond(message)
return True # Clear to respond
async def on_message(self, message):
plugins.broadcast_event(self, 2, message)
# Ensure bot can respond properly
if not self.can_respond(message):
return
# Ensure command is valid
if message.content.startswith(self.user.mention):
split_content = message.content.split(' ', 2)[1:]
else:
split_content = message.content[1:].split(' ', 1)
if len(split_content) == 1: # No spaces
split_content.append('')
base, parameters = split_content
command_pair, shortcut = commands.get_command_pair(self, base)
if not command_pair: # Suitable command not found
logging.debug("Suitable command not found: " + base)
return
# Bot is clear to get response. Send typing to signify
if self.configurations['core']['send_typing']:
await self.send_typing(message.channel)
# Parse command and reply
try:
print(message.author.name + ': ' + message.content)
parsed_command = parser.parse(
self, base, parameters, command_pair, shortcut)
print('\t' + str(parsed_command))
response = await (commands.execute(self, message, parsed_command))
except BotException as e: # Respond with error message
response = (str(e), False, 0, None)
except Exception as e: # General error
logging.error(e)
traceback.print_exc()
error = 'Uh oh. The bot encountered an exception: {0}: {1}'.format(
type(e).__name__, e)
response = (error, False, 0, None)
message_reference = await self.send_message(
message.channel, response[0], tts=response[1])
# A response looks like this:
# (text, tts, message_type, extra)
# message_type can be:
# 0 - normal
# 1 - permanent
# 2 - terminal (deletes itself after 'extra' seconds)
# 3 - active (pass the reference back to the plugin to edit)
# If message_type is >= 1, do not add to the edit dictionary
# TODO: Add normal message response to the edit dictionary
if response[2] == 2: # Terminal
await asyncio.sleep(response[3])
await self.delete_message(message_reference)
async def on_ready(self):
plugins.broadcast_event(self, 0)
# Make sure server data is ready
servers.check_all(self)
if self.debug:
logging.debug("=== {} online ===".format(self.user.name))
else:
print("=== {} online ===".format(self.user.name))
async def on_error(self, event, *args, **kwargs):
plugins.broadcast_event(self, 1, event, *args, **kwargs)
async def on_socket_raw_receive(self, msg):
plugins.broadcast_event(self, 3, msg)
async def on_socket_raw_send(self, payload):
plugins.broadcast_event(self, 4, payload)
async def on_message_delete(self, message):
plugins.broadcast_event(self, 5, message)
async def on_message_edit(self, before, after):
plugins.broadcast_event(self, 6, before, after)
async def on_channel_delete(self, channel):
plugins.broadcast_event(self, 7, channel)
async def on_channel_create(self, channel):
plugins.broadcast_event(self, 8, channel)
async def on_channel_update(self, before, after):
plugins.broadcast_event(self, 9, before, after)
async def on_member_join(self, member):
plugins.broadcast_event(self, 10, member)
async def on_member_update(self, before, after):
plugins.broadcast_event(self, 11, before, after)
async def on_server_join(self, server):
plugins.broadcast_event(self, 12, server)
async def on_server_remove(self, server):
plugins.broadcast_event(self, 13, server)
async def on_server_update(self, before, after):
plugins.broadcast_event(self, 14, before, after)
async def on_server_role_create(self, server, role):
plugins.broadcast_event(self, 15, server, role)
async def on_server_role_delete(self, server, role):
plugins.broadcast_event(self, 16, server, role)
async def on_server_role_update(self, before, after):
plugins.broadcast_event(self, 17, before, after)
async def on_server_available(self, server):
plugins.broadcast_event(self, 18, server)
async def on_server_unavailable(self, server):
plugins.broadcast_event(self, 19, server)
async def on_voice_state_update(self, before, after):
plugins.broadcast_event(self, 20, before, after)
async def on_member_ban(self, member):
plugins.broadcast_event(self, 21, member)
async def on_member_unban(self, server, user):
plugins.broadcast_event(self, 22, server, user)
async def on_typing(self, channel, user, when):
plugins.broadcast_event(self, 23, channel, user, when)
def save_data(self):
'''
Saves all data. For now, this will just be the servers file.
'''
logging.debug("Saving data...")
servers.save_data(self)
logging.debug("Saving data complete.")
def restart(self):
logging.debug("Attempting to restart the bot...")
self.save_data()
asyncio.ensure_future(self.logout())
os.system('python3.5 ' + self.path + '/start.py')
def shutdown(self):
logging.debug("Writing data on shutdown...")
self.save_data()
logging.debug("Closing down!")
asyncio.ensure_future(self.logout())
sys.exit()
def initialize(debug=False):
if debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
bot = Bot(debug)
bot.run(bot.get_token())
logging.error("Bot disconnected. Shutting down...")
bot.shutdown()
```
#### File: riotapichallenge2k16/jshbot/plugins.py
```python
import asyncio
import discord
import logging
import os.path
import importlib.util
import sys
# Debug
import traceback
from jshbot import core, configurations, commands
from jshbot.exceptions import ErrorTypes, BotException
EXCEPTION = 'Plugins'
def get_plugins(bot):
'''
Gets a list of all of the plugins and stores them as a key/value pair of
the plugin name and the module itself (renamed to plugin for the user).
'''
directory = bot.path + '/plugins'
try:
plugins_list = os.listdir(directory)
except:
raise BotException(ErrorTypes.STARTUP, EXCEPTION,
"Plugins directory not found")
valid_plugins = {}
# Add base plugin
from jshbot import base
command_pairs, shortcuts, manual = base.get_commands()
commands.add_commands(bot, command_pairs, 'base')
commands.add_commands(bot, shortcuts, 'base')
commands.add_manual(bot, manual)
valid_plugins['base'] = [base]
# Get plugin commands
for plugin in plugins_list:
if plugin[0] in ('.', '_') or plugin == 'base': # Dang swap files
continue
try:
spec = importlib.util.spec_from_file_location(
plugin, directory + '/{}'.format(plugin))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
command_pairs, shortcuts, manual = module.get_commands()
commands.add_commands(bot, command_pairs, plugin)
commands.add_commands(bot, shortcuts, plugin)
commands.add_manual(bot, manual)
except Exception as e:
traceback.print_exc()
raise BotException(ErrorTypes.STARTUP, EXCEPTION,
"Failed to import external plugin", plugin, e=e)
else:
logging.debug("Adding plugin {}".format(plugin))
valid_plugins[plugin] = [module]
# Get functions to broadcast
events = ['on_ready', 'on_error', 'on_message', 'on_socket_raw_receive',
'on_socket_raw_send', 'on_message_delete', 'on_message_edit',
'on_channel_delete', 'on_channel_create', 'on_channel_update',
'on_member_join', 'on_member_update', 'on_server_join',
'on_server_remove', 'on_server_update', 'on_server_role_create',
'on_server_role_delete', 'on_server_role_update',
'on_server_available', 'on_server_unavailable',
'on_voice_state_update', 'on_member_ban', 'on_member_unban',
'on_typing']
for plugin_name, plugin in valid_plugins.items():
functions = []
for event in events:
functions.append(getattr(plugin[0], event, None))
valid_plugins[plugin_name].append(functions)
if len(valid_plugins):
logging.debug("Loaded {} plugin(s)".format(len(valid_plugins)))
return valid_plugins
def broadcast_event(bot, event_index, *args):
'''
Loops through all of the plugins and looks to see if the event index
specified is associated it. If it is, call that function with args.
'''
for plugin_name, plugin_pair in bot.plugins.items():
function = plugin_pair[1][event_index]
if function:
asyncio.ensure_future(function(bot, *args))
``` |
{
"source": "JKChenFZ/hclib",
"score": 2
} |
#### File: nexus/library/bundle.py
```python
from machines import Workstation,Job
from simulation import Simulation,NullSimulationInput,NullSimulationAnalyzer
class SimulationBundleInput(NullSimulationInput):
None
#end class SimulationBundleInput
class SimulationBundleAnalyzer(NullSimulationAnalyzer):
None
#end class SimulationBundleAnalyzer
class SimulationBundle(Simulation):
input_type = SimulationBundleInput
analyzer_type = SimulationBundleAnalyzer
generic_identifier = 'bundle'
image_directory = 'bundle'
preserve = Simulation.preserve & set(['sims'])
def __init__(self,*sims,**kwargs):
if len(sims)==1 and isinstance(sims[0],list):
sims = sims[0]
#end if
if len(sims)==0:
self.error('attempted to bundle 0 simulations\n at least one simulation must be provided to bundle')
#end if
for sim in sims:
if not isinstance(sim,Simulation):
self.error('attempted to bundle non-simulation object: '+sim.__class__.__name__)
#end if
#end for
relative_paths = False
if 'relative' in kwargs:
relative_paths = kwargs['relative']
del kwargs['relative']
#end if
self.sims = sims
self.bundle_jobs(relative=relative_paths)
self.system = None
if not 'path' in kwargs:
kwargs['path'] = self.sims[0].path
#end if
if not 'job' in kwargs:
kwargs['job'] = self.job
#end if
Simulation.__init__(self,**kwargs)
self.infile = None
if isinstance(self.job.get_machine(),Workstation):
self.outfile = None
self.errfile = None
#end if
self.bundle_dependencies()
#sims in bundle should not submit jobs
for sim in sims:
sim.skip_submit = True
#end for
#end def __init__
#def init_job(self):
# None # this is to override the default behavior of Simulation
##end def init_job
def bundle_dependencies(self):
deps = []
for sim in self.sims:
for d in sim.dependencies:
deps.append((d.sim,'other'))
#end for
#end for
self.depends(*deps)
#end def bundle_dependencies
def bundle_jobs(self,relative=False):
jobs = []
job0 = self.sims[0].job
time = Job.zero_time()
nodes = 0
cores = 0
threads = job0.threads
queue = job0.queue
same_threads = True
same_queue = True
machine_names = set()
for sim in self.sims:
job = sim.job
nodes += job.nodes
cores += job.cores
same_threads = same_threads and threads==job.threads
same_queue = same_queue and queue==job.queue
time = job.max_time(time)
machine = job.get_machine()
machine_names.add(machine.name)
jobs.append(job)
#end for
if not same_threads:
self.error('bundling jobs with different numbers of threads is not yet supported',trace=False)
#end if
if not same_queue:
self.error('bundling jobs with different queues is not allowed',trace=False)
#end if
machine_names = list(machine_names)
if len(machine_names)!=1:
self.error('attempted to bundle jobs across these machines: '+str(machine_names)+'\n jobs may only be bundled on the same machine',trace=False)
#end if
self.job = Job(
bundled_jobs = jobs,
relative = relative,
queue = queue,
nodes = nodes,
cores = cores,
threads = threads,
machine = machine_names[0],
**time
)
#end def bundle_jobs
def pre_write_inputs(self,save_image):
for sim in self.sims:
if not sim.setup:
sim.write_inputs(save_image)
#end if
#end for
#end def pre_write_inputs
def pre_send_files(self,enter):
for sim in self.sims:
if not sim.sent_files:
sim.send_files(enter)
#end if
#end for
#end def pre_send_files
def post_submit(self):
for sim in self.sims:
sim.submitted = True
#end for
#end def post_submit
def pre_check_status(self):
if self.job.finished:
for sim in self.sims:
sim.job.finished = True
#end for
#end if
for sim in self.sims:
sim.check_status()
#end for
#end def pre_check_status
def check_sim_status(self):
finished = True
for sim in self.sims:
finished = finished and sim.finished
#end for
self.finished = finished
#end def check_sim_status
def get_output_files(self):
return list()
#end def get_output_files
def app_command(self):
return None
#end def app_command
#end class SimulationBundle
def bundle(*sims,**kwargs):
return SimulationBundle(*sims,**kwargs)
#end def bundle
```
#### File: nexus/library/generic.py
```python
import sys
import traceback
from copy import deepcopy
from abilities import AllAbilities,genbase
exit_call = exit
class obj(AllAbilities):
logfile = sys.stdout
def copy(self):
return self._copy()
def copies(self,count):
return self._copies(count)
def iteritems(self):
return self._iteritems()
def keys(self):
return self._keys()
def values(self):
return self._values()
def inverse(self):
return self._inverse()
def set(self,**variables):
return self._set(**variables)
def clear(self):
self._clear()
def add_attribute(self,name,value=None):
self._add_attribute(name,value)
def add_attributes(self,*names,**attributes):
self._add_attributes(*names,**attributes)
#def transfer_from(self,other,copy=False):
# self._transfer_from(other,copy)
#def transfer_to(self,other,copy=False):
# self._transfer_to(other,copy)
def append(self,value):
self._append(value)
def save(self,fpath=None):
self._save(fpath)
def load(self,fpath):
self._load(fpath)
def list(self,*names):
if len(names)==0:
names = list(self.keys())
names.sort()
#end if
values = []
for name in names:
values.append(self[name])
#end if
return values
#end def list
def tuple(self,*names):
return tuple(self.list(*names))
#end def tuple
def obj(self,*names):
o = obj()
o.transfer_from(self,keys=names,copy=False)
return o
#end def obj
def first(self):
return self[min(self.keys())]
#end def first
def last(self):
return self[max(self.keys())]
#end def last
def open_log(self,filepath):
self.logfile = open(filepath,'w')
#end def open_log
def close_log(self):
self.logfile.close()
#end def close_log
def _write(self,s):
self.logfile.write(s)
#end def _write
def write(self,s):
self._write(s)
#end def write
def logna(self,*items):
s=''
for item in items:
s+=str(item)+' '
#end for
self.logfile.write(s)
#end def logna
def log(self,*items):
s=''
for item in items:
s+=str(item)+' '
#end for
s+='\n'
self.logfile.write(s)
#end def log
def error(self,message,header=None,exit=True,trace=True,post_header=' Error:'):
pad = 4*' '
if header==None:
header = self.__class__.__name__
#end if
self.log(header+post_header)
self.log(pad+message.replace('\n','\n'+pad))
if exit:
self.log(' exiting.\n')
if trace:
traceback.print_stack()
#end if
exit_call()
#end if
#end def error
def warn(self,message,header=None,post_header=' Warning:'):
pad = 4*' '
if header==None:
header=self.__class__.__name__
#end if
self.log(header+post_header)
self.log(pad+message.replace('\n','\n'+pad))
#end def error
@classmethod
def class_error(cls,message,header=None,exit=True,trace=True,post_header=' Error:'):
pad = 4*' '
if header==None:
header = cls.__name__
#end if
cls.logfile.write(header+post_header+'\n')
cls.logfile.write(('\n'+message).replace('\n','\n'+pad)+'\n')
if exit:
cls.logfile.write(' exiting.\n\n')
if trace:
traceback.print_stack()
#end if
exit_call()
#end if
#end def class_error
@classmethod
def class_warn(cls,message,header=None,post_header=' Warning:'):
pad = 4*' '
if header==None:
header=cls.__name__
#end if
cls.logfile.write(header+post_header+'\n')
cls.logfile.write(('\n'+message).replace('\n','\n'+pad)+'\n')
#end def error
def transfer_from(self,other,keys=None,copy=False):
if keys==None:
keys = other.keys()
#end if
if not copy:
for k in keys:
self[k]=other[k]
#end for
else:
for k in keys:
self[k]=deepcopy(other[k])
#end for
#end if
#end def transfer_from
def transfer_to(self,other,keys=None,copy=False):
if keys==None:
keys = self.keys()
#end if
if not copy:
for k in keys:
other[k]=self[k]
#end for
else:
for k in keys:
other[k]=deepcopy(self[k])
#end for
#end if
#end def transfer_to
def move_from(self,other,keys=None):
if keys==None:
keys = other.keys()
#end if
for k in keys:
self[k]=other[k]
del other[k]
#end for
#end def move_from
def move_to(self,other,keys=None):
other.move_from(self,keys)
#end def move_to
def copy_from(self,other,keys=None,deep=False):
self.transfer_from(other,keys,copy=deep)
#end def copy_from
def copy_to(self,other,keys=None,deep=False):
self.transfer_to(other,keys,copy=deep)
#end def copy_to
def delete(self,*names):
if len(names)==1:
name = names[0]
value = self[name]
del self[name]
return value
else:
if len(names)==0:
names = sorted(obj.keys(self))
#end if
values = []
for name in names:
values.append(self[name])
del self[name]
#end for
return values
#end if
#end def delete
#end class obj
import copy
import cPickle
class generic(genbase):
logfile = sys.stdout
def __init__(self,*vals,**kwargs):
if len(vals)==1 and isinstance(vals[0],(dict,generic)):
self.add_attributes(**vals[0])
self.add_attributes(**kwargs)
else:
self.add_attributes(*vals,**kwargs)
#end for
#end def __init__
def _dict(self):
return self.__dict__
#end def __get_dict
def _alt(self):
return self.__dict__
#end def __alt
def __len__(self):
return len(self._dict())
#end def __len__
def __contains__(self,name):
return name in self._dict()
#end def __contains__
def __getitem__(self,name):
return self._dict()[name]
#end def __getitem__
def __setitem__(self,name,value):
self._dict()[name] = value
#end def __setitem__
def __delitem__(self,name):
del self._dict()[name]
#end def __delitem__
def __repr__(self):
s=''
stype = type(s)
d = self._dict()
mem = list(d.keys())
mem.sort()
for m in mem:
v=d[m]
if hasattr(v,'__class__'):
s+=' {0:<20} {1:<20}\n'.format(m,v.__class__.__name__)
else:
s+=' {0:<20} {1:<20}\n'.format(m,type(v))
#end if
#end for
return s
#end def __repr__
def __iter__(self):
d = self._dict()
for item in d.__dict__:
yield d[item]
#end for
#end def __iter__
def __str__(self,nindent=1):
pad = ' '
npad = nindent*pad
s=''
stype = type(s)
normal = []
qable = []
for k,v in self._dict().iteritems():
if type(k)!=stype or k[0]!='_':
if isinstance(v,(generic,obj)):
qable.append(k)
else:
normal.append(k)
#end if
#end if
#end for
normal.sort()
qable.sort()
for k in normal:
v = self[k]
indent = npad+18*' '
vstr = str(v).replace('\n','\n'+indent)
s+=npad+'{0:<15} = '.format(k)+vstr+'\n'
#end for
for k in qable:
v = self[k]
s+=npad+str(k)+'\n'
s+=v.__str__(nindent+1)
if isinstance(k,str):
s+=npad+'end '+k+'\n'
#end if
#end for
return s
#end def __str__
def copy(self):
return copy.deepcopy(self)
#end def copy
def iteritems(self):
return self._dict().iteritems()
#end def iteritems
def keys(self):
return self._dict().keys()
#end def keys
def values(self):
return self._dict().values()
#end def keys
def inverse(self):
new = self.__class__()
d = dict((v,k) for k, v in self.iteritems())
new.add_attributes(**d)
return new
#end def inverse
def set(self,**variables):
for name,value in variables.iteritems():
self[name]=value
#end for
return self
#end def set
def clear(self):
self._dict().clear()
#end def clear
def add_attribute(self,name,value=None):
self[name] = value
#end def add_attribute
def add_attributes(self,*names,**attributes):
for name in names:
self[name] = None
#end for
for name,value in attributes.iteritems():
self[name]=value
#end for
#end def add_attributes
def append(self,value):
self[len(self)] = value
#end def append
def save(self,fpath=None):
if fpath==None:
fpath='./'+self.__class__.__name__+'.p'
#end if
fobj = open(fpath,'w')
binary = cPickle.HIGHEST_PROTOCOL
cPickle.dump(self,fobj,binary)
fobj.close()
del fobj
del binary
return
#end def save
def load(self,fpath=None):
if fpath==None:
fpath='./'+self.__class__.__name__+'.p'
#end if
fobj = open(fpath,'r')
tmp = cPickle.load(fobj)
fobj.close()
d = self.__dict__
d.clear()
for k,v in tmp.__dict__.iteritems():
d[k] = v
#end for
del fobj
del tmp
return
#end def load
def transfer_from(self,other,keys=None,copy=False):
if keys==None:
keys = other.keys()
#end if
if not copy:
for k in keys:
self[k]=other[k]
#end for
else:
for k in keys:
self[k]=deepcopy(other[k])
#end for
#end if
#end def transfer_from
def transfer_to(self,other,keys=None,copy=False):
if keys==None:
keys = self.keys()
#end if
if not copy:
for k in keys:
other[k]=self[k]
#end for
else:
for k in keys:
other[k]=deepcopy(self[k])
#end for
#end if
#end def transfer_to
def move_from(self,other,keys=None):
if keys==None:
keys = other.keys()
#end if
for k in keys:
self[k]=other[k]
del other[k]
#end for
#end def move_from
def move_to(self,other,keys=None):
other.move_from(self,keys)
#end def move_to
def copy_from(self,other,keys=None,deep=False):
self.transfer_from(other,keys,copy=deep)
#end def copy_from
def copy_to(self,other,keys=None,deep=False):
self.transfer_to(other,keys,copy=deep)
#end def copy_to
def delete(self,*names):
if len(names)==1:
name = names[0]
value = self[name]
del self[name]
return value
else:
if len(names)==0:
names = sorted(generic.keys(self))
#end if
values = []
for name in names:
values.append(self[name])
del self[name]
#end for
return values
#end if
#end def delete
def list(self,*names):
if len(names)==0:
names = list(generic.keys(self))
names.sort()
#end if
values = []
for name in names:
values.append(self[name])
#end if
return values
#end def list
def tuple(self,*names):
return tuple(self.list(*names))
#end def tuple
def obj(self,*names):
o = obj()
o.transfer_from(self,keys=names,copy=False)
return o
#end def obj
def first(self):
return self[min(self.keys())]
#end def first
def last(self):
return self[max(self.keys())]
#end def last
def open_log(self,filepath):
self._alt().logfile = open(filepath,'w')
#end def open_log
def close_log(self):
self._alt().logfile.close()
#end def close_log
def write(self,s):
self._alt().logfile.write(s)
#end def write
def logna(self,*items):
s=''
for item in items:
s+=str(item)+' '
#end for
self._alt().logfile.write(s)
#end def logna
def log(self,*items):
s=''
for item in items:
s+=str(item)+' '
#end for
s+='\n'
self._alt().logfile.write(s)
#end def log
def error(self,message,header=None,exit=True,trace=True,post_header=' Error:'):
pad = 4*' '
if header==None:
header = self.__class__.__name__
#end if
self.log(header+post_header)
self.log(pad+message.replace('\n','\n'+pad))
if exit:
self.log(' exiting.\n')
if trace:
traceback.print_stack()
#end if
exit_call()
#end if
#end def error
def warn(self,message,header=None,post_header=' Warning:'):
pad = 4*' '
if header==None:
header=self.__class__.__name__
#end if
self.log(header+post_header)
self.log(pad+message.replace('\n','\n'+pad))
#end def error
@classmethod
def class_error(cls,message,header=None,exit=True,trace=True,post_header=' Error:'):
pad = 4*' '
if header==None:
header = cls.__name__
#end if
cls.logfile.write(header+post_header)
cls.logfile.write(pad+message.replace('\n','\n'+pad)+'\n')
if exit:
cls.logfile.write(' exiting.\n\n')
if trace:
traceback.print_stack()
#end if
exit_call()
#end if
#end def class_error
def _copy(self,*args,**kwargs):
return generic.copy(self,*args,**kwargs)
def _iteritems(self,*args,**kwargs):
return generic.iteritems(self,*args,**kwargs)
def _keys(self,*args,**kwargs):
return generic.keys(self,*args,**kwargs)
def _values(self,*args,**kwargs):
generic.values(self,*args,**kwargs)
def _inverse(self,*args,**kwargs):
return generic.inverse(self,*args,**kwargs)
def _set(self,*args,**kwargs):
generic.set(self,*args,**kwargs)
def _clear(self,*args,**kwargs):
generic.clear(self,*args,**kwargs)
def _add_attribute(self,*args,**kwargs):
generic.add_attribute(self,*args,**kwargs)
def _add_attributes(self,*args,**kwargs):
generic.add_attributes(self,*args,**kwargs)
def _append(self,*args,**kwargs):
generic.append(self,*args,**kwargs)
def _save(self,*args,**kwargs):
generic.save(self,*args,**kwargs)
def _load(self,*args,**kwargs):
generic.load(self,*args,**kwargs)
def _transfer_from(self,*args,**kwargs):
generic.transfer_from(self,*args,**kwargs)
def _transfer_to(self,*args,**kwargs):
generic.transfer_to(self,*args,**kwargs)
def _move_from(self,*args,**kwargs):
generic.move_from(self,*args,**kwargs)
def _move_to(self,*args,**kwargs):
generic.move_to(self,*args,**kwargs)
def _copy_from(self,*args,**kwargs):
generic.copy_from(self,*args,**kwargs)
def _copy_to(self,*args,**kwargs):
generic.copy_to(self,*args,**kwargs)
def _delete(self,*args,**kwargs):
generic.delete(self,*args,**kwargs)
def _list(self,*args,**kwargs):
return generic.list(self,*args,**kwargs)
def _tuple(self,*args,**kwargs):
return generic.tuple(self,*args,**kwargs)
def _obj(self,*args,**kwargs):
return generic.obj(self,*args,**kwargs)
def _first(self,*args,**kwargs):
return generic.first(self,*args,**kwargs)
def _last(self,*args,**kwargs):
return generic.last(self,*args,**kwargs)
def _open_log(self,*args,**kwargs):
generic.open_log(self,*args,**kwargs)
def _close_log(self,*args,**kwargs):
generic.close_log(self,*args,**kwargs)
def _write(self,*args,**kwargs):
generic.write(self,*args,**kwargs)
def _logna(self,*args,**kwargs):
generic.logna(self,*args,**kwargs)
def _log(self,*args,**kwargs):
generic.log(self,*args,**kwargs)
def _error(self,*args,**kwargs):
generic.error(self,*args,**kwargs)
def _warn(self,*args,**kwargs):
generic.warn(self,*args,**kwargs)
#end class generic
class hidden(generic):
def __init__(self,*vals,**kwargs):
d = object.__getattribute__(self,'__dict__')
d['_hidden_'] = generic()
d['_public_'] = generic()
d = self._dict()
generic.__init__(self,*vals,**kwargs)
#end def __init__
def _dict(self):
return self.__dict__['_public_']
#end def __get_dict
def _alt(self):
return self.__dict__['_hidden_']
#end def __alt
def __getattribute__(self,name):
d = object.__getattribute__(self,'__dict__')
if '_public_' in d:
p = d['_public_']
if name in p:
return p[name]
else:
return object.__getattribute__(self,name)
#end if
else:
return object.__getattribute__(self,name)
#end if
#end def __getattribute__
def __setattr__(self,name,value):
self._dict()[name] = value
#end def __setattr__
def __delattr__(self,name):
del self._dict()[name]
#end def __delattr__
def hidden(self):
return self.__dict__['_hidden_']
#end def hidden
def public(self):
return self.__dict__['_public_']
#end def public
def _hidden(self):
return hidden.hidden(self)
#end def _hidden
def _public(self):
return hidden.public(self)
#end def _public
#end class hidden
```
#### File: nexus/library/project_base.py
```python
import os
import traceback
import gc as garbage_collector
from memory import resident
from generic import obj
from developer import DevBase
modes = obj(
none = 0,
setup = 1,
send_files = 2,
submit = 3,
get_output = 4,
analyze = 5,
stages = 6,
all = 7
)
class Pobj(DevBase):
gc = garbage_collector
gc.enable()
mode = modes.stages
generate_only = False
status_only = False
monitor = True
skip_submission = False
emulate = False #override application and use application_emulator
verbose = True
debug = False
trace = False
load_images = True
sleep = 3
local_directory = './'
remote_directory = local_directory
file_locations = [local_directory]
runs = 'runs'
results = 'results'
#pseudo_dir = os.path.join(local_directory,'pseudopotentials')
pseudo_dir = None
pseudopotentials = None
modes = modes
primary_modes = ['setup','send_files','submit','get_output','analyze']
dependent_modes = set(['submit'])
stages = []
stages_set = set(stages)
wrote_something = False # for pretty printing
@staticmethod
def set_mode(mode):
if mode in Pobj.modes:
Pobj.mode = Pobj.modes[mode]
else:
print 'settings Error: invalid mode specified: '+mode+'\n valid modes are '+str(Pobj.modes.keys())
#end if
#end def set_mode
def mem_usage(self):
return int(resident()/1e6)
#end def mem_usage
indent = ' '
def log(self,*texts,**kwargs):
if self.verbose:
if len(kwargs)>0:
n = kwargs['n']
else:
n=0
#end if
text=''
for t in texts:
text+=str(t)+' '
#end for
pad = n*self.indent
self.logfile.write(pad+text.replace('\n','\n'+pad)+'\n')
#end if
Pobj.wrote_something = True
#end def log
#@classmethod
#def class_error(cls,msg,source=None,n=0,trace=True):
# if source==None:
# source = cls.__name__
# #end if
# pad = n*cls.indent
# text=pad+source+' Error: '+msg
# text = '\n'+text.replace('\n','\n'+pad)+'\n\n'
# cls.logfile.write(text)
# if trace:
# traceback.print_stack()
# #end if
# exit()
##end def class_error
#
#@classmethod
#def class_warn(cls,msg,source=None,n=0):
# if source==None:
# source = cls.__name__
# #end if
# pad = n*cls.indent
# text=pad+source+' Warning: '+msg
# cls.logfile.write(text.replace('\n','\n'+pad)+'\n')
##end def class_warn
def dlog(self,*texts,**kwargs):
if self.debug:
#self.log('mem_usage',self.mem_usage(),n=5)
self.log(*texts,**kwargs)
#end if
#end def dlog
def tlog(self,*texts,**kwargs):
if self.trace:
self.log(*texts,**kwargs)
w,s,j,f,g,a=int(self.setup),int(self.submitted),int(self.job.finished),int(self.finished),int(self.got_output),int(self.analyzed)
self.log('w,s,j,f,g,a',w,s,j,f,g,a,n=kwargs['n']+1)
#self.log('dependencies',self.dependencies.keys(),n=kwargs['n']+1)
#self.log('dependents ',self.dependents.keys(),n=kwargs['n']+1)
#end if
#end def tlog
working_directory = None
def enter(self,directory,changedir=True,msg=''):
self.working_directory = os.getcwd()
self.log(' Entering '+directory,msg)
if changedir:
os.chdir(directory)
#end if
pad = ' '
return pad
#end def enter
def leave(self):
os.chdir(self.working_directory)
#end def leave
#end class Pobj
```
#### File: nexus/library/qmcpack_analyzer_base.py
```python
from numpy import minimum,resize
from generic import obj
from hdfreader import HDFgroup
from qaobject import QAobject
from debug import *
class QAinformation(obj):
None
#end class QAinformation
class QAdata(QAobject):
def zero(self):
for value in self:
value[:] = 0
#end for
#self.sum()
#end def zero
def minsize(self,other):
for name,value in self.iteritems():
if name in other:
self[name] = resize(value,minimum(value.shape,other[name].shape))
else:
self.error(name+' not found in minsize partner')
#end if
#end for
#self.sum()
#end def minsize
def accumulate(self,other):
for name,value in self.iteritems():
if name in other:
value += other[name][0:len(value)]
else:
self.error(name+' not found in accumulate partner')
#end if
#end for
#self.sum()
#end def accumulate
def normalize(self,normalization):
for value in self:
value/=normalization
#end for
#self.sum()
#end def normalize
def sum(self):
s = 0
for value in self:
s+=value.sum()
#end for
print ' sum = {0}'.format(s)
#end def sum
#end class QAdata
class QAHDFdata(QAdata):
def zero(self):
for name,value in self.iteritems():
if isinstance(value,HDFgroup):
value.zero('value','value_squared')
#end if
#end for
#end def zero
def minsize(self,other):
for name,value in self.iteritems():
if isinstance(value,HDFgroup):
if name in other and isinstance(other[name],HDFgroup):
value.minsize(other[name],'value','value_squared')
else:
self.error(name+' not found in minsize partner')
#end if
#end if
#end for
#end def minsize
def accumulate(self,other):
for name,value in self.iteritems():
if isinstance(value,HDFgroup):
if name in other and isinstance(other[name],HDFgroup):
value.accumulate(other[name],'value','value_squared')
else:
self.error(name+' not found in accumulate partner')
#end if
#end if
#end for
#end def accumulate
def normalize(self,normalization):
for value in self:
if isinstance(value,HDFgroup):
value.normalize(normalization,'value','value_squared')
#end if
#end for
#end def normalize
#end class QAHDFdata
class QAanalyzer(QAobject):
verbose_vlog = False
capabilities = None
request = None
run_info = None
method_info = None
opt_methods = set(['opt','linear','cslinear'])
vmc_methods = set(['vmc'])
dmc_methods = set(['dmc'])
allowed_settings = []
@classmethod
def settings(cls,**kwargs):
invalid = list(set(kwargs.keys())-set(allowed_settings))
if len(invalid)>0:
cls.class_error('invalid variable names encountered in settings\n invalid names: {0}\n valid options are: {1}'.format(invalid,allowed_settings))
#end if
for name,value in kwargs.iteritems():
cls.__dict__[name] = value
#end for
#end def settings
def __init__(self,nindent=0):
self.info = QAinformation(
initialized = False,
data_loaded = False,
analyzed = False,
nindent = nindent
)
self.vlog('building '+self.__class__.__name__)
#end def __init__
def subindent(self):
return self.info.nindent+1
#end def indent
def vlog(self,msg,n=0):
if QAanalyzer.verbose_vlog:
self.log(msg,n=self.info.nindent+n)
#end if
#end def vlog
def reset_indicators(self,initialized=None,data_loaded=None,analyzed=None):
if initialized!=None:
self.info.initialized = initialized
#end if
if data_loaded!=None:
self.info.data_loaded = data_loaded
#end if
if analyzed!=None:
self.info.analyzed = analyzed
#end if
#end def reset_indicators
def init_sub_analyzers(self):
self.not_implemented()
#end def init_sub_analyzers
def load_data_local(self):
None
#end def load_data_local
def remove_data_local(self):
if 'data' in self:
del self.data
#end if
#end def remove_data_local
def analyze_local(self):
None
#end def analyze_local
def set_global_info(self):
None
#end def set_global_info
def unset_global_info(self):
None
#end def unset_global_info
def traverse(self,function,block_name=None,callpost=True,**kwargs):
if not callpost:
cls.__dict__[func_name](self,**kwargs)
#end if
if block_name is None or not self.info[block_name]:
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.traverse(value,func_name,block_name,callpost,**kwargs)
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.traverse(v,func_name,block_name,callpost,**kwargs)
#end if
#end for
#end if
#end for
#end if
if block_name!=None:
self.info[block_name] = True
#end if
if callpost:
cls.__dict__[func_name](self,**kwargs)
#end if
#end def traverse
def propagate_indicators(self,**kwargs):
self.reset_indicators(**kwargs)
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.propagate_indicators(**kwargs)
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.propagate_indicators(**kwargs)
#end if
#end for
#end if
#end for
#end def propagate_indicators
def load_data(self):
if not self.info.data_loaded:
self.vlog('loading '+self.__class__.__name__+' data',n=1)
self.load_data_local()
self.info.data_loaded = True
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.load_data()
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.load_data()
#end if
#end for
#end if
#end for
#end def load_data
def analyze(self,force=False):
self.set_global_info()
if not self.info.data_loaded:
self.load_data_local()
self.info.data_loaded = True
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.analyze(force)
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.analyze(force)
#end if
#end for
#end if
#end for
if not self.info.analyzed or force:
self.vlog('analyzing {0} data'.format(self.__class__.__name__),n=1)
self.analyze_local()
self.info.analyzed = True
#end if
self.unset_global_info()
#end def analyze
def remove_data(self):
self.vlog('removing '+self.__class__.__name__+' data',n=1)
names = list(self.keys())
for name in names:
if isinstance(self[name],QAdata):
del self[name]
#end if
#end for
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.remove_data()
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.remove_data()
#end if
#end for
#end if
#end for
#end def remove_data
def zero_data(self):
self.vlog('zeroing '+self.__class__.__name__+' data',n=1)
for value in self:
if isinstance(value,QAdata):
value.zero()
#end if
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.zero_data()
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.zero_data()
#end if
#end for
#end if
#end for
#end def zero_data
def minsize_data(self,other):
self.vlog('minsizing '+self.__class__.__name__+' data',n=1)
for name,value in self.iteritems():
if isinstance(value,QAdata):
if name in other and isinstance(other[name],value.__class__):
value.minsize(other[name])
else:
self.error('data '+name+' not found in minsize_data partner')
#end if
#end if
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
if name in other and isinstance(other[name],value.__class__):
ovalue = other[name]
else:
self.error('analyzer '+name+' not found in minsize_data partner')
#end if
value.minsize_data(ovalue)
elif isinstance(value,QAanalyzerCollection):
if name in other and isinstance(other[name],QAanalyzerCollection):
ovalue = other[name]
else:
self.error('collection '+name+' not found in minsize_data partner')
#end if
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
if n in ovalue and isinstance(ovalue[n],v.__class__):
ov = ovalue[n]
else:
self.error('analyzer '+n+' not found in minsize_data partner collection '+name)
#end if
v.minsize_data(ov)
#end if
#end for
#end if
#end for
#end def minsize_data
def accumulate_data(self,other):
self.vlog('accumulating '+self.__class__.__name__+' data',n=1)
for name,value in self.iteritems():
if isinstance(value,QAdata):
if name in other and isinstance(other[name],value.__class__):
value.accumulate(other[name])
else:
self.error('data '+name+' not found in accumulate_data partner')
#end if
#end if
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
if name in other and isinstance(other[name],value.__class__):
ovalue = other[name]
else:
self.error('analyzer '+name+' not found in accumulate_data partner')
#end if
value.accumulate_data(ovalue)
elif isinstance(value,QAanalyzerCollection):
if name in other and isinstance(other[name],QAanalyzerCollection):
ovalue = other[name]
else:
self.error('collection '+name+' not found in accumulate_data partner')
#end if
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
if n in ovalue and isinstance(ovalue[n],v.__class__):
ov = ovalue[n]
else:
self.error('analyzer '+n+' not found in accumulate_data partner collection '+name)
#end if
v.accumulate_data(ov)
#end if
#end for
#end if
#end for
#end def accumulate_data
def normalize_data(self,normalization):
self.vlog('normalizing '+self.__class__.__name__+' data',n=1)
for value in self:
if isinstance(value,QAdata):
value.normalize(normalization)
#end if
#end if
for name,value in self.iteritems():
if isinstance(value,QAanalyzer):
value.normalize_data(normalization)
elif isinstance(value,QAanalyzerCollection):
for n,v in value.iteritems():
if isinstance(v,QAanalyzer):
v.normalize_data(normalization)
#end if
#end for
#end if
#end for
#end def normalize_data
#end class QAanalyzer
class QAanalyzerCollection(QAobject):
None
#end class QAanalyzerCollection
```
#### File: nexus/library/vasp_analyzer.py
```python
import os
from numpy import array,zeros
from generic import obj
from simulation import Simulation,SimulationAnalyzer
from vasp_input import VaspInput
from developer import DevBase
from debug import *
# vasp xml reader classes/functions
class VXML(DevBase):
basic_types = set('i v dimension field set time'.split())
data_types = obj(int=int,string=str,float=float)
def __init__(self,tag,attr=None):
self._tag = tag
self._lines = []
self._attr = None
self._value = None
if attr!=None and len(attr)>0:
self._attr = obj()
tokens = attr.split('"')
next=0
for token in tokens:
next+=1
if len(token)>0 and token[-1]=='=':
name = token.strip()[:-1]
self._attr[name]=tokens[next].replace(' ','_').replace('-','_').lower()
#end if
#end for
#end if
#end def __init__
def _is_empty(self):
return len(self)-4==0
#end def _is_empty
def _add(self,new):
tag = new._tag
if not tag in self:
self[tag] = new
else:
cur = self[tag]
if 0 in cur:
cur._append(new)
else:
coll = VXMLcoll(tag)
coll._append(cur)
coll._append(new)
self[tag] = coll
#end if
#end if
#end def _add
def _parse(self):
# rename sub objects if name is present
for name in list(self.keys()):
value = self[name]
if isinstance(value,VXML):
if value._attr!=None and 'name' in value._attr:
del self[name]
self[value._attr.name] = value
del value._attr.name
elif isinstance(value,VXMLcoll):
for n in list(value.keys()):
v = value[n]
if isinstance(v,VXML):
if v._attr!=None and 'name' in v._attr:
del value[n]
self[v._attr.name] = v
del v._attr.name
#end if
#end if
#end for
if len(value)==0:
del self[name]
else:
value._reorder()
#end if
#end if
#end if
#end for
# have all sub objects parse (read fields, set _value)
for v in self:
if isinstance(v,VXML):
v._parse()
#end if
#end for
lines = self._lines
if len(lines)>0:
#fail = False
#try:
if self._tag=='array':
self._parse_array(lines)
else:
self._parse_values(lines)
#end if
#except Exception,e:
# print '_parse failed!'
# print e
# print self
# print tag
# print attr
# print lines[0:min(10,len(lines))]
# print
# print
# fail = True
##end try
#if fail:
# self.error('parse failed please see information above','read_vxml')
##end if
#end if
# if sub-objects resolve to a value, replace with that value
for name in list(self.keys()):
value = self[name]
if isinstance(value,VXML) and value._value!=None:
self[name] = value._value
#end if
#end for
# assign attributes
if self._attr!=None:
if 'type' in self._attr:
del self._attr.type
#end if
self.transfer_from(self._attr)
#end if
return
#end def _parse
def _parse_values(self,lines):
if len(lines)==1 and not '<' in lines[0]:
self._value = readval(lines[0])
else:
arr = None
for line in lines:
start = line.startswith('<') and not line.startswith('</')
end = line.endswith('>')
if start:
fline = line
else:
fline += line
#end if
if end:
tokens = fline.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
val = readval(tokens[2].strip())
if 'name=' in tn:
name = tn.split('name="',1)[1].split('"')[0].lower()
self[name] = val
elif arr is None:
arr = [val]
else:
arr.append(val)
#end if
#end if
#end for
if arr!=None:
self._value = array(arr)
#end if
#end if
#end def parse_values
def _parse_array(self,lines):
#print 'parsing array'
dims = obj()
fields = obj()
dim_counts = None
field_list = []
level = -1
set_dims = False
for line in lines:
if line.startswith('<'):
if line.startswith('<dimension'):
tokens = line.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
dname = tokens[2].lower().replace(' ','_').replace('-','_')
if 'dim=' in tn:
d = int(tn.split('dim="',1)[1].split('"')[0])
dims[d] = dname
else:
dims.append(dname)
#end if
elif line.startswith('<field'):
tokens = line.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
fname = tokens[2].lower().replace(' ','_').replace('-','_')
if 'type=' in tn:
t = tn.split('type="',1)[1].split('"')[0]
if t in VXML.data_types:
dtype = VXML.data_types[t]
else:
self.error('field type {0} is unrecognized: {1}'.format(t,line))
#end if
else:
dtype = float
#end if
fields.append(obj(name=fname,dtype=dtype))
elif line.startswith('<set'):
if not set_dims:
dims = dims.list()
dims.reverse()
dims = tuple(dims)
dim_counts = zeros((len(dims),),dtype=int)
set_dims = True
#end if
level += 1
dim_counts[level]=0
elif line.startswith('</set'):
level -= 1
if level!=-1:
dim_counts[level]+=1
#end if
else:
self.error('array parsing failed\n unrecognized xml encountered: {0}'.format(line),'read_vxml')
#end if
else:
dim_counts[level]+=1
field_list.append(line.split())
#end if
#end for
self.dims = dims
for findex,field in fields.iteritems():
lst = []
for field_vals in field_list:
lst.append(field_vals[findex])
#end for
arr = array(lst,dtype=field.dtype).ravel()
arr.shape = tuple(dim_counts)
self[field.name] = arr
#end for
#print ' done'
#end def _parse_array
def _remove_empty(self):
for n in list(self.keys()):
v = self[n]
if isinstance(v,VXML):
v._remove_empty()
if isinstance(v,VXMLcoll) and v._is_empty():
del self[n]
#end if
#end if
#end for
#end def _remove_empty
def _remove_hidden(self):
del self._tag
del self._attr
del self._lines
del self._value
for v in self:
if isinstance(v,VXML):
v._remove_hidden()
#end if
#end for
#end def _remove_hidden()
#end class VXML
class VXMLcoll(VXML):
def _append(self,new):
index = len(self)-4
self[index]=new
#end def _append
def _reorder(self):
n=0
for key in sorted(self.keys()):
value = self[key]
if isinstance(value,VXML):
del self[key]
self[n]=value
n+=1
#end if
#end for
#end def _reorder
#end class VXMLcoll
booldict = dict(T=True,F=False)
def readval(val):
fail = False
split = False
if isinstance(val,str):
split = ' ' in val
#end if
if isinstance(val,list) or split:
if split:
val = val.split()
#end if
try:
v = array(val,dtype=int)
except:
try:
v = array(val,dtype=float)
except:
try:
v = array(val,dtype=str)
except:
fail = True
#end try
#end try
#end try
elif val in booldict:
v = booldict[val]
else:
try:
v = int(val)
except:
try:
v = float(val)
except:
v = val
#end try
#end try
#end if
if fail:
VXML.class_error('failed to read value: "{0}"'.format(val),'read_vxml')
#end if
return v
#end def readval
def read_vxml(filepath):
if not os.path.exists(filepath):
VXML.class_error('file {0} does not exist'.format(filepath),'read_vxml')
#end if
#print 'read'
contents = open(filepath,'r').read()
#print 'replace'
contents = contents.replace('<rc>',' ').replace('</rc>',' ')
contents = contents.replace('<r>' ,' ').replace('</r>' ,' ')
contents = contents.replace('<c>' ,' ').replace('</c>' ,' ')
#print 'split lines'
lines = contents.splitlines()
#print 'process lines'
root = VXML('vasprun')
stack = [root]
cur = stack[0]
for line in lines:
ls = line.strip()
if ls.startswith('</'):
tag = ls[2:-1]
if tag==cur._tag:
stack.pop()
cur = stack[-1]
#print len(stack)*' '+'end '+tag
else:
cur._lines.append(ls)
#end if
elif ls.startswith('<?'):
None
elif ls.startswith('<'):
ta,rest = ls[1:].split('>',1)
tokens = ta.split(' ',1)
tag = tokens[0]
if not tag in VXML.basic_types:
if len(tokens)==1:
attr = None
else:
attr = tokens[1].strip()
#end if
if ls.endswith('</{0}>'.format(tag)):
new = VXML(tag,attr)
new._lines.append(ls.replace('<','|').replace('>','|').split('|')[2])
cur._add(new)
#print len(stack)*' '+'end '+tag
else:
#print len(stack)*' '+tag
new = VXML(tag,attr)
cur._add(new)
cur = new
stack.append(cur)
#end if
else:
cur._lines.append(ls)
#end if
else:
cur._lines.append(ls)
#end if
#end for
if len(stack)!=1:
VXML.class_error('read failed\nxml tree did not seem to close')
#end if
#print 'parse'
root._parse()
root._remove_empty()
root._remove_hidden()
#print 'done'
return root
#end def read_vxml
# vasp outcar functions
class VaspLines(DevBase):
def __init__(self,lines):
self.pointer = 0
self.lines = lines
#end def __init__
def advance_line(self,amount):
self.pointer += amount
return self.lines[self.pointer]
#end def advance_line
def advance_token(self,token):
psave = self.pointer
for line in self.lines[self.pointer:]:
if token in line:
return line
#end if
self.pointer += 1
#end while
self.pointer = psave
return None
#end def advance
def advance(self,amount):
self.pointer += amount
#end def advance
def remainder(self):
return self.lines[self.pointer:]
#end def remainder
def rewind(self,point=0):
self.pointer = point
#end def rewind
def get_line(self,point=None):
if point is None:
point = self.pointer
#end if
return self.lines[point]
#end def get_line
def get_line_ahead(self,nahead):
return self.lines[self.pointer+nahead]
#end def get_line_ahead
#end class VaspLines
def read_outcar_header_values(vlines,odata):
line = vlines.advance_token('TOTEN')
odata.total_energy = float(line.split()[4])
vlines.advance_token('energy without entropy')
#end def read_outcar_header_values
def read_outcar_core_potentials(vlines,odata):
line = vlines.advance_token('the test charge radii are')
odata.core_potential_radii = array(line.split()[5:],dtype=float)
vlines.advance(2)
n = 0
cpots = []
for line in vlines.remainder():
ls = line.strip()
n+=1
if len(ls)==0:
break
#end if
tokens = line.replace('-',' -').split()
cpots.extend(tokens[1::2])
#end for
odata.core_potentials = array(cpots,dtype=float)
vlines.advance(n)
#end def read_outcar_core_potentials
def read_outcar_fermi_energy(vlines,odata):
line = vlines.advance_token('E-fermi')
odata.Efermi = float(line.split()[2])
#end def read_outcar_fermi_energy
def read_outcar_bands(vlines,odata):
bands = obj()
line = vlines.advance_token('spin component')
if line!=None:
last_empty = True
n = 0
for line in vlines.remainder():
if len(line)>2:
if line[1]=='s':
ns = int(line.split()[2])
spin = obj()
bands[ns] = spin
elif line[1]=='k':
tokens = line.split()
nk = int(tokens[1])
kp = array(tokens[3:],dtype=float)
kpoint = obj(kpoint=kp,energies=[],occupations=[])
spin[nk]=kpoint
elif line[2]=='b':
None
else:
bnum,energy,occ = line.split()
kpoint.energies.append(float(energy))
kpoint.occupations.append(float(occ))
#end if
last_empty = False
else:
if last_empty:
break
#end if
last_empty = True
#end if
n+=1
#end for
vlines.advance(n)
#end if
for ns,spin in bands.iteritems():
for nk,kpoint in spin.iteritems():
kpoint.energies = array(kpoint.energies,dtype=float)
kpoint.occupations = array(kpoint.occupations,dtype=float)
#end for
#end for
odata.bands = bands
#end def read_outcar_bands
def read_outcar_charge_mag(vlines,odata,token):
ion = obj(s=[],p=[],d=[],tot=[])
total = obj()
vlines.advance_token(token)
vlines.advance(4)
prev_end = False
n=0
for line in vlines.remainder():
n+=1
if prev_end:
break
#end if
if line[0]=='-':
prev_end = True
else:
vals = array(line.split()[1:],dtype=float)
ion.s.append(vals[0])
ion.p.append(vals[1])
ion.d.append(vals[2])
ion.tot.append(vals[3])
#end if
#end for
for channel,vals in ion.iteritems():
ion[channel] = array(vals,dtype=float)
#end for
vlines.advance(n)
vals = array(line.split()[1:],dtype=float)
total.s = vals[0]
total.p = vals[1]
total.d = vals[2]
total.tot = vals[3]
return ion,total
#end def read_outcar_charge_mag
def read_outcar_total_charge(vlines,odata):
ion,total = read_outcar_charge_mag(vlines,odata,'total charge ') # trailing space is important
odata.ion_charge = ion
odata.total_charge = total
#end def read_outcar_total_charge
def read_outcar_magnetization(vlines,odata):
ion,total = read_outcar_charge_mag(vlines,odata,'magnetization')
odata.ion_magnetization = ion
odata.total_magnetization = total
#end def read_outcar_magnetization
def read_outcar_stress(vlines,odata):
vlines.advance_token('FORCE on cell')
line = vlines.advance_line(1)
dirs = line.split()[1:]
st = array(vlines.advance_token('Total').split()[1:],dtype=float)
st_kb = array(vlines.advance_line(1).split()[2:],dtype=float)
pressure = float(vlines.advance_line(1).split()[3])
stress = obj()
stress_kb = obj()
for i in range(len(dirs)):
d = dirs[i].lower()
stress[d] = st[i]
stress_kb[d] = st_kb[i]
#end for
odata.stress = stress
odata.stress_kb = stress_kb
odata.pressure = pressure
#end def read_outcar_stress
def read_outcar_cell(vlines,odata):
vlines.advance_token('VOLUME and BASIS')
volume = float(vlines.advance_line(3).split()[-1])
a1 = vlines.advance_line(2).split()[0:3]
a2 = vlines.advance_line(1).split()[0:3]
a3 = vlines.advance_line(1).split()[0:3]
lattice_vectors = array([a1,a2,a3],dtype=float)
odata.volume = volume
odata.lattice_vectors = lattice_vectors
#end def read_outcar_cell
def read_outcar_position_force(vlines,odata):
position = []
force = []
vlines.advance_token('POSITION')
vlines.advance(2)
prev_end = False
for line in vlines.remainder():
if prev_end:
break
#end if
if line[1]=='-':
prev_end = True
else:
tokens = line.split()
position.append(tokens[0:3])
force.append(tokens[3:6])
#end if
#end for
total_drift = line.split()[2:5]
odata.position = array(position,dtype=float)
odata.force = array(force,dtype=float)
odata.total_drift = array(total_drift,dtype=float)
#end def read_outcar_position_force
def read_outcar_accounting(vlines,odata):
time = obj()
memory = obj()
vlines.advance_token('General timing and accounting')
vlines.advance(2)
time.cpu = float(vlines.advance_line(1).split()[-1])
time.user = float(vlines.advance_line(1).split()[-1])
time.system = float(vlines.advance_line(1).split()[-1])
time.elapsed = float(vlines.advance_line(1).split()[-1])
vlines.advance(1)
memory.maximum = float(vlines.advance_line(1).split()[-1])
memory.average = float(vlines.advance_line(1).split()[-1])
odata.time = time
odata.memory = memory
#end def read_outcar_accounting
class OutcarData(DevBase):
any_functions = [
('header_values' , read_outcar_header_values ),
]
elast_functions = [
('core_potentials' , read_outcar_core_potentials),
('fermi_energy' , read_outcar_fermi_energy ),
('bands' , read_outcar_bands ),
('total_charge' , read_outcar_total_charge ),
('magnetization' , read_outcar_magnetization ),
('stress' , read_outcar_stress ),
('cell' , read_outcar_cell ),
('position_force' , read_outcar_position_force ),
]
ilast_functions = [
('accounting' , read_outcar_accounting ),
]
read_outcar_functions = any_functions + elast_functions + ilast_functions
def __init__(self,filepath=None,lines=None):
if filepath!=None:
if not os.path.exists(filepath):
self.error('file {0} does not exist'.format(filepath))
#end if
f = open(filepath,'r')
lines = f.read().splitlines()
f.close()
#end if
self.vlines = VaspLines(lines)
#end def __init__
def read(self,ilast=False,elast=False,all=True):
ilast |= all
elast |= all
vlines = self.vlines
del self.vlines
read_functions = []
read_functions.extend(self.any_functions)
if elast:
read_functions.extend(self.elast_functions)
if ilast:
read_functions.extend(self.ilast_functions)
#end if
#end if
for quantity,read_function in read_functions:
try:
read_function(vlines,self)
except:
None
#end try
#end for
#end def read
#end class OutcarData
# main analyzer class
class VaspAnalyzer(SimulationAnalyzer):
def __init__(self,arg0=None,xml=False,analyze=False):
path = None
prefix = None
incar = None
outcar = None
xmlfile = None
if isinstance(arg0,Simulation):
sim = arg0
file = sim.infile
path = sim.locdir
elif arg0!=None:
path,file = os.path.split(arg0)
else:
file = ''
xml = False
#end if
if len(file)>0:
if file.endswith('INCAR'):
incar = file
prefix = file.replace('INCAR','').strip()
elif file.endswith('OUTCAR'):
prefix = file.replace('OUTCAR','').strip()
else:
self.error('please provide the path to an INCAR or OUTCAR file')
#end if
outcar = prefix+'OUTCAR'
xmlfile = prefix+'vasprun.xml'
if prefix=='':
prefix=None
#end if
#end if
self.info = obj(
path = path,
prefix = prefix,
incar = incar,
outcar = outcar,
xmlfile = xmlfile,
xml = xml
)
if analyze:
self.analyze()
#end if
#end def __init__
def analyze(self,outcar=None):
if outcar is None and self.info.outcar!=None:
outcar = os.path.join(self.info.path,self.info.outcar)
#ned if
if self.info.xml:
self.xmldata = read_vxml(os.path.join(self.info.path,self.info.xmlfile))
#end if
if outcar!=None:
self.analyze_outcar(outcar)
#end if
#end def analyze
def analyze_outcar(self,outcar):
if not os.path.exists(outcar):
self.error('outcar file {0} does not exist'.format(outcar))
#end if
oc = open(outcar,'r')
lines = oc.read().splitlines()
oc.close()
del oc
# gather initialization lines
init = []
n = 0
for line in lines:
if len(line)>0 and line[0]=='-' and 'Iteration' in line:
break
#end if
init.append(line)
n+=1
#end for
# gather lines for each iteration
ion_steps = obj()
for line in lines[n:]:
if len(line)>0 and line[0]=='-' and 'Iteration' in line:
iteration = []
inum,enum = line.strip(' -Iteration)').split('(')
inum = int(inum)
enum = int(enum)
if not inum in ion_steps:
ion_steps[inum] = obj()
#end if
ion_steps[inum][enum] = OutcarData(lines=iteration)
#end if
iteration.append(line)
#end for
del lines
del n
# read data from each iteration
if len(ion_steps)>0:
imax = array(ion_steps.keys(),dtype=int).max()
for inum,ion_step in ion_steps.iteritems():
ilast = inum==imax
if len(ion_step)>0:
emax = array(ion_step.keys(),dtype=int).max()
for enum,elec_step in ion_step.iteritems():
elast = enum==emax
elec_step.read(ilast,elast,all=False)
if ilast and elast:
self.transfer_from(elec_step)
#end if
#end for
#end if
#end for
#end if
self.ion_steps = ion_steps
#end def analyze_outcar
#end class VaspAnalyzer
```
#### File: src/GUI/qmcGUI.py
```python
import pygtk
pygtk.require('2.0')
import gtk
import pango
from Geometry import *
from Wavefunction import *
from Run import *
from xml.dom import getDOMImplementation
from xml.dom.ext import PrettyPrint
from os.path import basename
import os.path
viewerOkay = True
try:
import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError:
viewerOkay = False
if (viewerOkay):
from StructViewer import *
class MainManager (gtk.UIManager):
def __init__(self):
gtk.UIManager.__init__(self)
def Setup(self):
FileGroup = gtk.ActionGroup("File")
FileAction = gtk.Action("File", "_File", "File operations", None)
self.SaveAction = gtk.Action ("Save", "_Save", "Save qmcPACK input.", gtk.STOCK_SAVE)
self.SaveAsAction = gtk.Action ("SaveAs", "Save _As", "Save qmcPACK input with a new name", gtk.STOCK_SAVE_AS)
self.QuitAction = gtk.Action("Quit", "_Quit", "Exit qmcGUI", gtk.STOCK_QUIT)
FileGroup.add_action(FileAction)
FileGroup.add_action(self.SaveAction)
FileGroup.add_action(self.SaveAsAction)
FileGroup.add_action(self.QuitAction)
ViewGroup = gtk.ActionGroup("View")
ViewAction = gtk.Action("View", "_View", "View operations", None)
self.StructureAction = gtk.Action ("Structure", "_Structure", "View structure", None)
ViewGroup.add_action(ViewAction)
ViewGroup.add_action(self.StructureAction)
HelpGroup = gtk.ActionGroup("Help")
HelpAction = gtk.Action("Help", "_Help", "User assistance", gtk.STOCK_HELP)
self.AboutAction = gtk.Action ("About", "_About", "About qmcGUI", gtk.STOCK_ABOUT)
HelpGroup.add_action (HelpAction);
HelpGroup.add_action (self.AboutAction);
self.insert_action_group (FileGroup, 0)
self.insert_action_group (ViewGroup, -1)
self.insert_action_group (HelpGroup, -1)
descriptor = "<ui> "\
" <menubar name=\"MenuBar\"> "\
" <menu action=\"File\"> "\
" <menuitem action=\"Save\"/> "\
" <menuitem action=\"SaveAs\"/> "\
" <menuitem action=\"Quit\"/> "\
" </menu> "\
" <menu action=\"View\"> "\
" <menuitem action=\"Structure\"/> "\
" </menu> "\
" <menu action=\"Help\"> "\
" <menuitem action=\"About\"/> "\
" </menu> "\
" </menubar> "\
"</ui> "
self.add_ui_from_string(descriptor)
class GUIAboutDialog(gtk.AboutDialog):
def __init__(self):
gtk.AboutDialog.__init__(self)
self.set_name ("qmcPACK Input Builder")
self.set_version("0.1")
self.set_copyright("Copyright 2007, GNU Public License")
self.set_website("http://cms.mcc.uiuc.edu/qmcpack/index.php/QMCPACK_Wiki_Home")
self.set_authors(["<NAME> (<EMAIL>)"])
#############################################################
# function relativeto #
# return a the name of file2 relative to the path of file1 #
# Example: #
# file1 = "/home/kesler/BN/abc.xml" #
# file2 = "/home/kesler/pseudo/B.xml" #
# print relativeto (file1, file2) yields "../pseudo/B.xml" #
#############################################################
def relative2 (file1, file2):
dir1 = os.path.dirname(file1)
dir2 = os.path.dirname(file2)
dirlist1 = dir1.split ('/')
dirlist2 = dir2.split ('/')
if (dirlist1[0] == ''):
dirlist1.remove('')
if (dirlist2[0] == ''):
dirlist2.remove('')
print "dirlist1 = " + repr(dirlist1)
common = ""
i = 0
mindirs = min (len(dirlist1), len(dirlist2))
while ((i < mindirs) and (dirlist1[i] == dirlist2[i])):
common = common + '/' + dirlist1[i]
i = i+1
common = common
dirstrip = dir1.replace(common,'',1)
filestrip = file2.replace(common+'/','',1)
striplist = dirstrip.split('/')
rel = ""
for d in striplist:
if (d != ""):
rel = rel + "../"
rel = rel + filestrip
return rel
def relativeto (file1, file2):
dir = os.path.dirname (file2)
common = os.path.commonprefix((file1, dir))
dirstrip = dir.lstrip(common)
filestrip = file2.lstrip(common)
dirlist = dirstrip.split('/')
rel = ""
for d in dirlist:
if (d != ""):
rel = rel + "../"
rel = rel + filestrip
return rel
class GUI:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title ('qmcPACK Input Builder')
manager = MainManager()
self.window.add_accel_group (manager.get_accel_group())
manager.Setup()
mainMenu = manager.get_widget("/MenuBar")
mainBox = gtk.VBox()
mainBox.pack_start (mainMenu, False, False)
# Connect menu callbacks
manager.SaveAsAction.connect("activate", self.save_as_callback)
manager.SaveAction.connect("activate", self.save_callback)
self.About = GUIAboutDialog()
manager.AboutAction.connect("activate", self.about_callback)
self.window.connect("delete_event", self.delete)
notebook = gtk.Notebook()
notebook.set_tab_pos (gtk.POS_LEFT)
self.GeometryFrame = Geometry()
self.WavefunctionFrame = Wavefunction(self.GeometryFrame)
self.RunFrame = Run()
notebook.append_page (self.GeometryFrame, gtk.Label("Geometry"))
notebook.append_page (self.WavefunctionFrame, gtk.Label("Wave function"))
notebook.append_page (self.RunFrame, gtk.Label("Run"))
if (viewerOkay):
self.Viewer = StructureViewer()
notebook.append_page(self.Viewer, gtk.Label("Viewer"))
mainBox.pack_start (notebook, False, False, 5)
self.window.add (mainBox)
# Setup dialogs
buttons = (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK)
self.SaveAsDialog = gtk.FileChooserDialog("Save qmcPACK input as", \
self.window, gtk.FILE_CHOOSER_ACTION_SAVE, buttons)
self.SaveAsDialog.hide()
self.Filename = None
self.window.show_all()
def about_callback (self, action):
self.About.run()
def save_as_callback(self, action):
if (self.SaveAsDialog.run() == gtk.RESPONSE_OK):
self.Filename = self.SaveAsDialog.get_filename()
self.write_qmcPACK(self.Filename)
self.window.set_title (basename(self.Filename))
self.SaveAsDialog.hide()
def save_callback(self, action):
if (self.Filename == None):
self.save_as_callback(action)
else:
self.write_qmcPACK (self.Filename)
def write_qmcPACK(self, filename):
impl = getDOMImplementation()
doc = impl.createDocument(None, "qmcPACK", None)
topElem = doc.documentElement
# Write out structural geometry elements
systemElem = doc.createElement("qmcsystem")
systemElem.setAttribute("dim", "3")
# Simulation cell information
cellElem = doc.createElement ("simulationcell")
latticeElem = doc.createElement ("parameter")
latticeElem.setAttribute("name", "lattice")
latticeText = doc.createTextNode("\n"+self.GeometryFrame.get_lattice_text()+" ")
latticeElem.appendChild (latticeText)
bcondsElem = doc.createElement ("bconds")
bcondsText = doc.createTextNode(" " + self.GeometryFrame.get_periodic_text() + " ")
bcondsElem.appendChild (bcondsText)
cellElem.appendChild (latticeElem)
cellElem.appendChild (bcondsElem)
# Atom position information
particleSetElem = doc.createElement ("particleset")
particleSetElem.setAttribute ("name", "i")
particleSetElem.setAttribute ("size", repr (self.GeometryFrame.get_num_atoms()))
systemElem.appendChild(cellElem)
# Ion types
ionData = self.GeometryFrame.get_atom_data()
bareIons = False
pseudoIons = False
for data in ionData:
groupElem = doc.createElement ("group")
groupElem.setAttribute("name", data[0])
chargeElem = doc.createElement("parameter")
chargeElem.setAttribute("name", "charge")
chargeElem.appendChild(doc.createTextNode(repr(data[2])))
valenceElem = doc.createElement("parameter")
valenceElem.setAttribute("name", "valence")
valenceElem.appendChild(doc.createTextNode(repr(data[2])))
zElem = doc.createElement("parameter")
zElem.setAttribute("name", "atomicnumber")
zElem.appendChild(doc.createTextNode(repr(data[1])))
groupElem.appendChild(chargeElem)
groupElem.appendChild(valenceElem)
groupElem.appendChild(zElem)
particleSetElem.appendChild (groupElem)
if ((data[3]=="Nonlocal PP") or (data[3] == "LocalPP")):
pseudoIons = True
else:
bareIons = True
# Add atom positions
posElem = doc.createElement("attrib")
posElem.setAttribute("name", "position")
posElem.setAttribute("datatype", "posArray")
posElem.setAttribute("condition", "1")
positions = self.GeometryFrame.AtomPos.get_atom_positions()
posText = "\n"
for pos in positions:
rowText = " " + ("%12.6f %12.6f %12.6f" % (pos[0], pos[1], pos[2])) + "\n"
posText = posText + rowText
posText = posText + " "
posElem.appendChild(doc.createTextNode(posText))
particleSetElem.appendChild(posElem)
# Add atom types
idElem = doc.createElement("attrib")
idElem.setAttribute ("name", "ionid")
idElem.setAttribute ("datatype", "stringArray")
idText = "\n "
ids = self.GeometryFrame.AtomPos.get_atom_types()
for id in ids:
if (id != None):
idText = idText + id + " "
idText = idText + "\n "
idElem.appendChild(doc.createTextNode(idText))
particleSetElem.appendChild(idElem)
systemElem.appendChild(particleSetElem)
# Add hamiltonian section
hamElem = doc.createElement("hamiltonian")
hamElem.setAttribute("name", "h0")
hamElem.setAttribute("type", "generic")
hamElem.setAttribute("targey", "e")
# e-e interaction
eeElem = doc.createElement("pairpot")
eeElem.setAttribute("name", "ElecElec")
eeElem.setAttribute("name", "coulomb")
eeElem.setAttribute("source", "e")
hamElem.appendChild(eeElem)
# e-ion interaction
# Pseudopotentials
if (pseudoIons):
pseudoElem = doc.createElement("pairpot")
pseudoElem.setAttribute("type", "pseudo")
pseudoElem.setAttribute("name", "PseudoPot")
pseudoElem.setAttribute("source", "i")
pseudoElem.setAttribute("wavefunction", "psi0")
pseudoElem.setAttribute("format", "xml")
for data in ionData:
if (data[3] == "Nonlocal PP"):
pElem = doc.createElement ("pseudo")
pElem.setAttribute("elementType", data[0])
if (data[4] == None):
print "Warning: nonlocal pseudopotential file not set."
else:
pElem.setAttribute("href", \
relative2(filename,data[4]))
pseudoElem.appendChild (pElem)
hamElem.appendChild(pseudoElem)
if (bareIons):
bareElem = doc.createElement("pairpot")
bareElem.setAttribute("name", "barIon")
bareElem.setAttribute("type", "coulomb")
hamElem.appendChild (bareElem)
systemElem.appendChild(hamElem)
topElem.appendChild (systemElem)
#########################
# Write run information #
#########################
for run in self.RunFrame.RunList:
runtype = run.TypeCombo.get_active_text()
runElem = doc.createElement ("qmc")
runElem.setAttribute("method", runtype)
runElem.setAttribute("target", "e")
paramList = run.get_param_list()
for param in paramList:
paramElem = doc.createElement("parameter")
paramElem.setAttribute("name", param[0])
textElem = doc.createTextNode (param[1])
paramElem.appendChild (textElem)
runElem.appendChild (paramElem)
topElem.appendChild (runElem)
file = open (filename, "w")
PrettyPrint (doc, file)
file.close()
def delete(self, widget, event=None):
gtk.main_quit()
return False
def run(self):
gtk.main()
if __name__ == "__main__":
gui = GUI()
gui.run()
```
#### File: qmcpack/utils/Energy.py
```python
from numpy import *
import sys
def corr(i,x,mean,var):
N=len(x)
if var < 1.0e-10:#if the variance is 0 return an effectively infinity corr
return 1
corr=1.0/var*1.0/(N-i)*sum((x[0:N-i]-mean)*(x[i:N]-mean))
return corr
def Stats(x):
N=len(x)
mean=sum(x)/(N+0.0)
xSquared=x*x
var=sum(xSquared)/(N+0.0)-mean*mean
i=0
tempC=0.5
kappa=0.0
while (tempC>0 and i<(N-1)):
kappa=kappa+2.0*tempC
i=i+1
tempC=corr(i,x,mean,var)
if kappa == 0.0:
kappa = 1.0
Neff=(N+0.0)/(kappa+0.0)
if (Neff == 0.0):
Neff = 1.0
error=sqrt(var/Neff)
return (mean,var,error,kappa)
def Block (x, blockfactor):
N = len(x)
Nblocks = N / blockfactor
xb = zeros(Nblocks)
for i in range(0,Nblocks):
start = i*blockfactor
end = (i+1)*blockfactor
xb[i] = sum(x[start:end])/(blockfactor+0.0)
return xb
def MeanErrorString (mean, error):
if (mean!=0.0):
meanDigits = math.floor(math.log(abs(mean))/math.log(10))
else:
meanDigits=2
if (isnan(error)):
error = 0.0
if (error!=0.0):
rightDigits = -math.floor(math.log(error)/math.log(10))+1
else:
rightDigits=8
if (rightDigits < 0):
rightDigits = 0
formatstr = '%1.' + '%d' % (rightDigits) + 'f'
meanstr = formatstr % mean
errorstr = formatstr % error
return meanstr + ' +/- ' + errorstr
# return (meanstr, errorstr)
file = open (sys.argv[1], 'r')
names = file.readline().split()
file.close()
s = loadtxt(sys.argv[1])
c = s.shape[0]
factor = 1.0
if (len(sys.argv) > 4):
c = int(sys.argv[4])
if (len(sys.argv) > 3):
factor = float(sys.argv[3])
s = s / factor
if len(sys.argv) > 2:
first = int(sys.argv[2])
else:
first = 20
#data = s[first:c,1]
#(avg, var, error, kapp) = Stats(data)
#print "Kappa = " + repr(kapp)
Ewald = 0.0
MPC = 0.0
KEcorr = 0.0
totE = 0.0
err = 0.0
for i in range(2,len(names)):
n = names[i];
data = s[first:c,i-1]
(avg, var, error, kapp) = Stats(data)
if (n == 'AcceptRatio' or n=='BlockCPU' or n=='BlockWeight'):
avg *= factor
error *= factor
if (n == 'ElecElec'):
Ewald = avg
if (n == 'MPC'):
MPC = avg
if (n == 'KEcorr'):
KEcorr = avg
if (n == 'LocalEnergy'):
totE = avg
err = error
if (n == 'LocalEnergy_sq'):
E = s[first:c,i-2]
(eavg, evar, eerr, ekapp) = Stats(E)
variance = avg/factor - eavg*eavg
n = 'Variance'
avg = variance
print '%-20s = %s' % (n, MeanErrorString(avg,error))
correction = KEcorr
if (Ewald !=0.0 and MPC != 0.0):
correction += MPC - Ewald
if (abs(correction) > 1.0e-12):
print '-----------------------------------------------------'
print '%-20s = %s' % ('Corrected energy', MeanErrorString(totE+correction,err))
#E = s[first:c,1];
#Vloc = s[first:c,2];
#KE = s[first:c,3];
#E_E = s[first:c,4];
#locPP = s[first:c,5];
#NLPP = s[first:c,6];
#IonIon = s[first:c,7];
#avg = mean (E);
#var = mean ((E-avg)*(E-avg));
#err = sqrt(var/c);
#N = len(E)
#print "Blockfactor Std Error"
#errlist = []
#for factor in range(10,N/2,10):
# Eblock = Block(E,factor)
# E2 = Eblock*Eblock
# avg = sum (Eblock)/(len(Eblock)+0.0)
# var = var=sum((Eblock-avg)*(Eblock-avg))/(len(Eblock)+0.0)
# error = sqrt (var/(len(Eblock)+0.0))
# errlist.append(error)
# print " %4d %8.6f" % (factor, error)
#(avg, var, error, kapp) = Stats(E)
#error = max(errlist)
#print "E = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(Vloc)
#print "Vloc = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(KE)
#print "KE = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(E_E)
#print "E_E = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(locPP)
#print "locPP = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(NLPP)
#print "NLPP = " + MeanErrorString(avg,error)
#(avg, var, error, kapp) = Stats(IonIon)
#print "IonIon = " + repr(avg)
```
#### File: hclib/tools/median.py
```python
import os
import sys
def median(mylist):
sorts = sorted(mylist)
length = len(sorts)
if not length % 2:
return (sorts[length / 2] + sorts[length / 2 - 1]) / 2.0
return sorts[length / 2]
l = []
for line in sys.stdin:
l.append(float(line))
print str(median(l))
```
#### File: hclib/tools/timeline.py
```python
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
class Task:
def __init__(self, start, lbl, event_id):
self.start = start
self.elapsed = -1
self.lbl = lbl
self.event_id = event_id
def set_elapsed(self, end_time):
assert self.elapsed == -1
self.elapsed = end_time - self.start
def normalize_start(self, min_time):
self.start = self.start - min_time
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
colors_iter = 0
colors = [('r', 'Red'),
('y', 'Yellow'),
('b', 'Blue'),
('g', 'Green'),
('c', 'Cyan'),
('m', 'Magenta'),
('#FA8072', 'Salmon'),
('#808000', 'Olive'),
('#FF00FF', 'Fuchsia')]
colors_dict = {}
for color in colors:
colors_dict[color[0]] = color[1]
labels = {}
max_timestamp = 0
min_timestamp = None
if len(sys.argv) != 2:
print('usage: python timeline.py timeline')
sys.exit(1)
fp = open(sys.argv[1], 'r')
total_events = 0
tasks = {}
line_no = 1
for line in fp:
tokens = line.split(' ')
total_events += 1
timestamp = int(tokens[0])
thread = int(tokens[1])
event_type = tokens[2]
transition = tokens[3]
event_id = int(tokens[4])
if event_type not in labels:
if colors_iter >= len(colors):
print('Ran out of colors, add some')
sys.exit(1)
labels[event_type] = colors[colors_iter][0]
colors_iter += 1
if not thread in tasks:
tasks[thread] = []
if transition == 'START':
tasks[thread].append(Task(timestamp, event_type, event_id))
if min_timestamp is None:
min_timestamp = timestamp
else:
min_timestamp = min(min_timestamp, timestamp)
elif transition == 'END':
found = None
for task in tasks[thread]:
if task.event_id == event_id:
assert found is None
found = task
assert not found is None
found.set_elapsed(timestamp)
max_timestamp = max(max_timestamp, timestamp)
else:
print('Unsupported transition "' + transition + '" at line ' + str(line_no))
sys.exit(1)
line_no = line_no + 1
fig = plt.figure(num=0, figsize=(18, 6), dpi=80)
width = 0.35 # the width of the bars: can also be len(x) sequence
color_counter = 0
ind = 0
x_labels = []
print('Elapsed time: ' + str(float(max_timestamp - min_timestamp) / 1000000.0) + ' ms')
print(str(total_events) + ' events in total')
for lbl in labels:
print(lbl + ': ' + colors_dict[labels[lbl]])
for thread in sorted(tasks.keys()):
x_labels.append(str(thread))
task_no = 1
for t in tasks[thread]:
t.normalize_start(min_timestamp)
if task_no % 5000 == 0:
print(str(thread) + ' ' + str(task_no) + '/' + str(len(tasks[thread])))
# Plot in microseconds
plt.barh(ind, float(t.elapsed) / 1000000.0, height=width,
left=(float(t.start) / 1000000.0), linewidth=1,
color=labels[t.lbl])
task_no = task_no + 1
ind = ind + width
plt.ylabel('Threads')
plt.xlabel('Time (ms)')
plt.yticks(np.arange(0, len(tasks.keys()), width) + width/2.,
x_labels)
plt.axis([ 0, float(max_timestamp-min_timestamp) / 1000000.0, 0, ind ])
plt.show()
``` |
{
"source": "jkchengh/s2m2",
"score": 3
} |
#### File: s2m2/models/auv.py
```python
from math import *
import numpy as np
import random
from scipy.integrate import odeint
from models.agent import *
from math import *
import numpy as np
from numpy.linalg import inv, norm
class AUV(Agent):
def __init__(self, size, velocity, k):
self.size = size
self.velocity = velocity
self.k = k
def model(self, q, t, u):
# x, y, z, roll, pitch, yaw
x, y, z, phi, theta, psi = q
v = u[0]
wx, wy, wz = u[1]
xdot = v * cos(psi) * cos(theta)
ydot = v * sin(psi) * cos(theta)
zdot = v * sin(theta)
phidot = wx + wy * sin(phi) * tan(theta) + wz * cos(phi) * tan(theta)
thetadot = wy * cos(phi) - wz * sin(phi)
psidot = wy * sin(phi) * (1 / cos(theta)) + wz * cos(phi) * (1 / cos(theta))
return [xdot, ydot, zdot, phidot, thetadot, psidot]
def controller(self, q, qref, uref):
x, y, z, phi, theta, psi = q
c_phi = cos(phi)
c_theta = cos(theta)
c_psi = cos(psi)
s_phi = sin(phi)
s_theta = sin(theta)
s_psi = sin(psi)
k_1, k_2, k_3 = self.k
R = np.array(
[[c_psi * c_theta, c_psi * s_theta * s_phi - s_psi * c_phi, c_psi * s_theta * c_phi + s_psi * s_phi],
[s_psi * c_theta, s_psi * s_theta * s_phi + c_psi * c_phi, s_psi * s_theta * c_phi - c_psi * s_phi],
[-s_theta, c_theta * s_phi, c_theta * c_phi]])
B_2 = np.array([[1, sin(phi) * tan(theta), cos(phi) * tan(theta)],
[0, cos(phi), -sin(phi)],
[0, sin(phi) * (1 / cos(theta)), cos(phi) * (1 / cos(theta))]])
phi_d, theta_d, psi_d = qref[3:6]
u_2d = np.array([[phi_d]])
v_d = uref[0]
# The error is calculated here. This is the error between the waypoint and
# the current state
e_x, e_y, e_z, e_phi, e_theta, e_psi = calculate_error(q, qref, uref, R)
u_2d = np.transpose(np.array(uref[1]))
B_2d = np.array([[1, sin(phi_d) * tan(theta_d), cos(phi_d) * tan(theta_d)],
[0, cos(phi_d), -sin(phi_d)],
[0, sin(phi_d) * (1 / cos(theta_d)), cos(phi_d) * (1 / cos(theta_d))]])
Q = np.transpose(np.array([0, e_z * v_d / k_2, e_y * v_d * cos(e_theta) / k_3])) # te
P_e = np.transpose(np.array([k_1 * sin(e_phi), k_2 * sin(e_theta), k_3 * sin(e_psi)]))
# This is the control law
gamma = 1
v_b = v_d * (cos(e_psi) * cos(e_theta) - 1) + e_x * gamma ** 2
u_2b = inv(B_2) * (Q + (B_2d - B_2) * u_2d + P_e)
u_1 = v_d + v_b
u_2 = u_2d + u_2b
u_2 = [u_2[0][0], u_2[1][1], u_2[2][2]]
return [u_1, u_2]
def bloating(self, n):
return 0
k1, k2, k3, kp = self.k
if n != 0:
return sqrt(4*n/k2)
else:
return 0
def run_model(self, q0, t, qref, uref):
q = [q0]
u0 = [0, [0, 0, 0]]
for i in range(0, len(t)):
t_step = [t[i - 1], t[i]]
dx, dy, dz, dphi, dtheta, dpsi = [random.uniform(-0.001, 0.001),
random.uniform(-0.001, 0.001),
random.uniform(-0.001, 0.001),
random.uniform(-0.001, 0.001),
random.uniform(-0.001, 0.001),
random.uniform(-0.001, 0.001)]
q1 = odeint(self.model, q0, t_step, args=(u0,)) + [dx, dy, dz, dphi, dtheta, dpsi]
q0 = q1[1]
q.append(q0)
u0 = self.controller(q0, qref[i], uref[i])
return q
def calculate_error(q, qref, uref, R):
X = np.array(q[0:3])
Theta = np.array(q[3:6])
# X_d = trajectory(t[idx], T, params)
X_d = np.array(qref[0:3])
Theta_d = np.array(qref[3:6])
v_d = uref[0]
wx,wy,wz = uref[1]
X_e = np.transpose(R)*(X_d - X)
Theta_e = Theta_d - Theta
return [X_e[0][0], X_e[1][1], X_e[2][2], Theta_e[0], Theta_e[1], Theta_e[2]]
``` |
{
"source": "jkcm/lagrangian-cset",
"score": 3
} |
#### File: jkcm/lagrangian-cset/met_utils.py
```python
import numpy as np
import warnings
from scipy import integrate
warnings.simplefilter("ignore")
p0 = 1000. # reference pressure, hPa
Rdry = 287. # gas const for dry air, J/K/kg
Rvap = 461. # gas const for water vapor, J/K/kg
eps = Rvap/Rdry - 1
cp = 1004. # cp_dry, specific heat of dry air at const pressure, J/K/kg
g = 9.81 # grav acceleration at sea level, m/s2
lv = 2.5*10**6 # latent heat of vaporization at 0C, J/kg
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal. Courtesy of scipy-Cookbook
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett',
blackman', flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
"""
if isinstance(x, list):
x = np.array(x)
if window_len % 2 == 0:
raise ValueError("please use odd-numbered window_len only.")
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', "
"hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w/w.sum(), s, mode='valid')
return y[int(window_len/2):-int(window_len/2)]
def qvs_from_p_T(p, T):
"""p in Pa, T in K. return is in kg/kg
"""
es = 611.2*np.exp(17.67*(T-273.15)/(T-29.65))
qvs = 0.622*es/(p-0.378*es)
return qvs
def qv_from_p_T_RH(p, T, RH):
"""p in Pa, T in K, Rh in pct. return is in kg/kg
"""
# es = 611.2*np.exp(17.67*(T-273.15)/(T-29.65))
qvs = qvs_from_p_T(p, T)
rvs = qvs/(1-qvs)
rv = RH/100. * rvs
qv = rv/(1+rv)
return qv
def tvir_from_T_w(T, w):
"""T in L, w in kg/kg"""
t_vir = T*(1+0.61*w)
return t_vir
def theta_from_p_T(p, T, p0=1000):
theta = T * (p0/p)**(Rdry/cp)
return theta
def get_liquid_water_theta(temp, theta, q_l):
"""temp = air temp (K) theta = pot temp, q_l = liquid water MR"""
theta_l = theta - (theta*lv*q_l/(temp*cp*1000))
return theta_l
def density_from_p_Tv(p, Tv):
return p/(Rdry*Tv)
#def thetae_from_theta_w_T(theta, w, T):
# """theta = pot temp in K, w = mr in kg/kg, T = temp in K"""
# returnb theta*np.exp(lv*)
def thetae_from_t_tdew_mr_p(t, tdew, mr, p):
"""From Bolton, 1980
t, tdew in K, mr in kg/kg, p in Pa
"""
t_lcl = 56 + 1/((1/(tdew-56))+(np.log(t/tdew)/800))
e = p*mr/(mr + 0.622)
K = 0.2854 # Rdry/cp
theta_lcl = t*(100000/(p-e))**K*(t/t_lcl)**(0.28*mr)
theta_e = theta_lcl*np.exp((3036/t_lcl - 1.78)*mr*(1+0.488*mr))
return theta_e
def get_LCL(t, t_dew, z):
if np.any(t_dew > t):
t_dew = np.minimum(t, t_dew)
# raise ValueError('dew point temp above temp, that\'s bananas')
return z + 125*(t - t_dew)
def get_virtual_dry_static_energy(T, q, z):
return cp*T*(1+eps*q) + g*z
def get_moist_adiabatic_lapse_rate(T, p): #K and hPa
es = 611.2*np.exp(17.67*(T-273.15)/(T-29.65)) # Bolton formula, es in Pa
qs = 0.622*es/(p*100-0.378*es)
num = 1 + lv*qs/(Rdry*T)
denom = 1 + lv**2*qs/(cp*Rvap*T**2)
gamma = g/cp*(1-num/denom)
return gamma
def get_moist_adiabat(t, p, p_arr):
pass
def get_Ri_profile(u, v, q, T, z, T0=None, z0=None, q0=None, filt=False):
if filt:
u = smooth(u, window_len=filt)
v = smooth(v, window_len=filt)
q = smooth(q, window_len=filt)
T = smooth(T, window_len=filt)
z = smooth(z, window_len=filt)
if T0 is None:
T0 = T[0]
if z0 is None:
z0 = z[0]
if q0 is None:
q0 = q[0]
del_U_sq = u**2 + v**2
sv_0 = get_virtual_dry_static_energy(T0, q0, z0)
sv_hbl = get_virtual_dry_static_energy(T, q, z)
Ri_b = z*(2*g*(sv_hbl - sv_0))/(del_U_sq*(sv_hbl + sv_0 - g*z0 - g*z))
return Ri_b
def Ri_pbl_ht(u, v, q, T, z, T0=None, z0=None, q0=None, smooth=False):
indx = np.flatnonzero(z < 40)[-1] # avg all values below this for sfc values
if T0 is None:
T0 = np.nanmean(T[:indx])
if z0 is None:
z0 = np.nanmean(z[:indx])
if q0 is None:
q0 = np.nanmean(q[:indx])
Ri = get_Ri_profile(u, v, q, T, z, T0, z0, q0, smooth)
try:
indx = np.flatnonzero(np.array(Ri) > 0.25)[0]
z_pbl = z[indx]
if z_pbl > 4000:
raise IndexError
except IndexError:
return 0, float('nan')
return indx, z_pbl
def RH_fancy_pblht_1d( z, RH):
"""
at least 80% of the time, one could
identify a 'RH inversion base' as the altitude of max RH for which RH(zi
+ 300 m) - RH(zi) < -0.3. If such a layer does not exist below 4 km or
the top of the sounding, we say an inversion is not present.
"""
if z.shape != RH.shape:
raise ValueError('z and RH must have the same shape')
if len(z.shape) != 1: # height axis
raise ValueError('data has an invalid number of dimensions')
if not (z < 4000).any():
raise ValueError('must have data below 4000m')
z = smooth(z, window_len=5)
RH = smooth(RH, window_len=5)
#
# flipped = False
## if np.all(z[100:-100] != sorted(z[100:-100])): # not in ascending order
## if np.all(z[100:-100:-1] == sorted(z[100:-100])): # in descenting order
# if z[0] > z[-1]: # starts off higher
# if True:
# z = z[::-1]
# theta = theta[::-1]
# flipped = True
# else:
# raise ValueError("data not in ascending or descending order")
#
# %%
z_p300 = z + 300
# i_arr = np.empty_like(z)
RH_diff = np.empty_like(z)
for n, zpn in enumerate(z_p300):
i_arr = np.abs(z - zpn).argmin()
RH_diff[n] = RH[n] - RH[i_arr]
# print(RH_diff)
inv_cands = np.where(RH_diff > 30)[0]
RH_cands = RH[inv_cands]
if len(RH_cands) == 0:
if np.all(np.isnan(RH_diff)):
return {'z': np.nan, 'RH': np.nan, 'i': np.nan, 'inversion': False}
biggest_drop = np.nanargmax(RH_diff)
z_drop=z[biggest_drop]
RH_drop = RH[biggest_drop]
return {'z': z_drop, 'RH': RH_drop, 'i': biggest_drop, 'inversion': False}
best = np.argmax(RH_cands)
best_index = inv_cands[best]
RH_bot = RH[best_index]
z_bot = z[best_index]
return {'z': z_bot, 'RH': RH_bot, 'i': best_index, 'inversion': True}
# %%
# inv_cands = RH
# for n,i in enumerate(i_arr):
# print(z[n] - z[i])
def RH_50_pblht_1d(z, RH):
"""
Given z and RH, return height where RH drops below 50
"""
if z.shape != RH.shape:
raise ValueError('z and RH must have the same shape')
if len(z.shape) != 1: # height axis
raise ValueError('data has an invalid number of dimensions')
if not (z < 4000).any():
raise ValueError('must have data below 4000m')
z = smooth(z, window_len=5)
RH = smooth(RH, window_len=5)
nanfrac = sum(np.isnan(RH)/len(RH))
if np.nanmin(RH) > 50 or np.all(np.isnan(RH)) or np.nanmax(RH) < 50:
# print('no inversion')
return {"z": np.nan, "i": np.nan, 'inversion': False}
i_min = np.where(z == z[RH < 50].min())[0][0]
z_i = z[i_min]
# print(z_i)
# RH_i = RH[i_min]
return {"z": z_i, "i":i_min, 'inversion': True}
pass
def Peter2_inv(z, rh, theta):
idx = z<3000
rh = rh[idx]
theta = theta[idx]
z = z[idx]
grad_rh = np.gradient(rh, z)
grad2_rh = np.gradient(grad_rh, z)
grad2_rh[np.where(grad2_rh>0)] = 0 # looking for a strong decrease in rh grad
grad2_rh[np.where(grad_rh>0)] = 0 # must be negative grad in rh
grad_theta = np.gradient(theta, z)
grad2_theta = np.gradient(grad_theta, z)
grad2_theta[np.where(grad2_theta<0)] = 0 #looking for a strong increase in theta grad
grad2_theta[np.where(grad_theta<0)] = 0 #must be positive grad in theta
grad2_prod = grad2_rh*grad2_theta
return(z[np.argmin(grad2_prod)])
def moist_static_energy(t, z, q):
return cp*t + g*z + lv*q
def get_inversion_layer_2d(z, t, p, axis=0, handle_nans=False):
res_dict = {key: np.empty(z.shape[axis]) for key in ["z_top", "z_mid", "z_bot", "i_top", "i_mid", "i_bot",
"t_above_inv", "t_below_inv", "d_t_inv"]}
for i,(z_i,t_i,p_i) in enumerate(zip(z,t,p)):
try:
res = quick_inversion(z_i,t_i,p_i)
except ValueError as e:
if handle_nans:
res = {"z_top": np.nan, "z_mid": np.nan, "z_bot": np.nan,
"i_top": np.nan, "i_mid": np.nan, "i_bot": np.nan}
else:
import matplotlib.pyplot as plt
plt.plot(t_i[z_i<4000], z_i[z_i<4000])
raise e
for key, value in res.items():
res_dict[key][i] = value
return res_dict
def quick_inversion(z, t, p, smooth_t=False): # z in meters, t in K, p in hPa
#getting layers
gamma_moist = get_moist_adiabatic_lapse_rate(T=t, p=p)*1000
if smooth_t:
gamma = -np.gradient(smooth(t, window_len=31), z)*1000
else:
gamma = -np.gradient(t, z)*1000
gamma[np.gradient(z)>-1] = np.nan
gamma[z<330] = np.nan
gamma[z>3000] = np.nan
gamma[np.abs(gamma)>100] = np.nan
gamma_diff = (gamma-gamma_moist)/1000
return_dict = {"z_top": np.nan, "z_mid": np.nan, "z_bot": np.nan,
"i_top": np.nan, "i_mid": np.nan, "i_bot": np.nan,
"t_above_inv": np.nan, "t_below_inv": np.nan, "d_t_inv": np.nan}
#inversion center
i_mid = np.nanargmin(gamma) # middle of inversion is where the lapse rate is the strongest
if np.isnan(i_mid):
print('no i_mid')
return buncha_nans
z_mid = z[i_mid]
return_dict['i_mid'] = i_mid
return_dict['z_mid'] = z_mid
#inversion base
max_gap = gamma[i_mid] - gamma_moist[i_mid]
try: # first way to get the inversion base: where the lapse rate is sufficiently close to the moist adiabat again
z_bot = np.max(z[np.logical_and(z<z[i_mid], gamma-gamma_moist>max_gap/4)])
except ValueError as v: # no crossing of the max_gap/4 line go for smallest gap below zmid
cands = z<z[i_mid] # second way to get the inversion base: wherever it gets closest.
if not np.any(cands):
raise ValueError("no values below inversion middle!")
z_bot = z[cands][np.argmin(np.abs(gamma[cands]-gamma_moist[cands]))]
i_bot = np.argwhere(z==z_bot)[0][0]
return_dict['i_bot'] = i_bot
return_dict['z_bot'] = z_bot
#inversion top
top_candidates = np.logical_and(z>z[i_mid], gamma-gamma_moist>max_gap/4)
if np.any(top_candidates):
z_top = np.min(z[top_candidates]) # first way to get inversion top: where the lapse rate is sufficiently close to the moist adiabat again
i_top = np.argwhere(z==z_top)[0][0]
else: #second way to get inversion top: wherever it gets closest
cands = z>z[i_mid]
if not np.any(cands):
raise ValueError("no values above inversion middle!")
z_top = z[cands][np.argmin(np.abs(gamma[cands]-gamma_moist[cands]))]
i_top = np.argwhere(z==z_top)[0][0]
return_dict['i_top'] = i_top
return_dict['z_top'] = z_top
t_below_inv = t[i_bot]
i_inv = np.logical_and(z>z_bot, z<z_top)
d_t_inv = integrate.trapz(gamma_diff[i_inv], z[i_inv])
t_above_inv = t_below_inv + d_t_inv
return_dict['t_above_inv'] = t_above_inv
return_dict['t_below_inv'] = t_below_inv
return_dict['d_t_inv'] = d_t_inv
return return_dict
def calc_decoupling_and_inversion_from_sounding(sounding_dict, usetheta=False, get_jumps=True, smooth_t=True):
#Setting up variables
z = sounding_dict['GGALT']
theta = sounding_dict['THETA']
theta_e = sounding_dict['THETAE']
qv = sounding_dict['QV']
t = sounding_dict['ATX']
if 'PSXC' in sounding_dict.keys():
p = sounding_dict['PSXC']
else:
p = sounding_dict['PSX']
if not usetheta:
theta_l = sounding_dict['THETAL']
ql = sounding_dict['QL']
if np.all(np.isnan(ql)):
qt = qv
else:
qt = qv + ql
else:
theta_l = sounding_dict['THETA']
qt = qv
#failing quietly
buncha_nans = {"d_qt": np.nan, "d_theta_e": np.nan, "d_theta_l": np.nan,
"alpha_thetal": np.nan, "alpha_qt":np.nan, "alpha_thetae": np.nan,
"d_q_inv": np.nan, "d_t_inv": np.nan,
"t_below_inv": np.nan, "t_above_inv": np.nan, "q_below_inv": np.nan, "q_above_inv": np.nan,
"z_top": np.nan, "z_mid": np.nan, "z_bot": np.nan, "i_top": np.nan, "i_mid": np.nan, "i_bot": np.nan}
buncha_nans['lat'] = np.nanmean(sounding_dict['GGLAT'])
buncha_nans['lon'] = np.nanmean(sounding_dict['GGLON'])
buncha_nans['lon_p'] =-140 + 0.8*(buncha_nans['lon']+140) + 0.4*(buncha_nans['lat']-30)
buncha_nans['time'] = sounding_dict['TIME'][0]
inv_levs = quick_inversion(z, t, p, smooth_t=smooth_t)
buncha_nans.update(inv_levs)
z_top, z_mid, z_bot = inv_levs['z_top'], inv_levs['z_mid'], inv_levs['z_bot']
i_top, i_mid, i_bot = inv_levs['i_top'], inv_levs['i_mid'], inv_levs['i_bot']
# for key, value in inv_levs.items():
# buncha_nans[key] = value # better with dict.update()?
#jumps in q, t
# i_upper = np.logical_and(z<=z_top, z>=z_mid) #this is the upper half of the inversion
# if np.sum(i_upper) == 0:
# print("error: no upper inv layer: z_top: {} z_mid: {}".format(z_top, z_mid))
# return buncha_nans
# i_lower = np.logical_and(z>z_bot, z<z_mid) #this is the lower half of the inversion
q_above_inv = qt[i_top]
q_below_inv = qt[i_bot]
d_q_inv = q_above_inv - q_below_inv
buncha_nans['q_above_inv'] = q_above_inv
buncha_nans['q_below_inv'] = q_below_inv
buncha_nans['d_q_inv'] = d_q_inv
#decoupling ests
upper_25 = z_bot - (z_bot - min(z))/4. #top quarter of the MBL
u_i = np.logical_and(z > upper_25, z < z_bot)
lower_25 = min(z) + (z_bot - min(z))/4. #bottom quarter of the MBL
l_i = np.logical_and(z < lower_25, z > min(z))
ft_base = z_top
ft_top = ft_base + 500
l_ft = np.logical_and(z < ft_top, z > ft_base) #lower_free tropospheric values
if z_bot - min(z) < 300 or np.sum(l_ft) == 0:
return buncha_nans # can't calculate decouplng values if there is not enough MBL vertical or free-tropospheric
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
theta_e_sml = np.nanmean(theta_e[l_i])
theta_e_bzi = np.nanmean(theta_e[u_i])
theta_e_uzi = np.nanmean(theta_e[l_ft])
theta_l_sml = np.nanmean(theta_l[l_i])
theta_l_bzi = np.nanmean(theta_l[u_i])
theta_l_uzi = np.nanmean(theta_l[l_ft])
qt_sml = np.nanmean(qt[l_i])
qt_bzi = np.nanmean(qt[u_i])
qt_uzi = np.nanmean(qt[l_ft])
d_theta_e = theta_e_bzi - theta_e_sml
d_theta_l = theta_l_bzi - theta_l_sml
d_qt = qt_bzi - qt_sml
buncha_nans['d_qt'] = d_qt
buncha_nans['d_theta_l'] = d_theta_l
buncha_nans['d_theta_e'] = d_theta_e
alpha_thetal = (theta_l_bzi - theta_l_sml)/(theta_l_uzi - theta_l_sml)
alpha_qt = (qt_bzi - qt_sml)/(qt_uzi - qt_sml)
alpha_thetae = (theta_e_bzi - theta_e_sml)/(theta_e_uzi - theta_e_sml)
buncha_nans['alpha_thetal'] = alpha_thetal
buncha_nans['alpha_qt'] = alpha_qt
buncha_nans['alpha_thetae'] = alpha_thetae
return buncha_nans # hopefully no longer nans
def calc_decoupling_and_zi_from_flight_data(flight_data, usetheta=False):
var_list = ['GGLAT', 'GGLON', 'GGALT', 'RHUM', 'ATX', 'MR', 'THETAE', 'THETA', 'PSX', 'DPXC', 'PLWCC']
sounding_dict = {}
sounding_dict['TIME'] = flight_data.time.values
for i in var_list:
sounding_dict[i] = flight_data[i].values
if 'ATX' in var_list:
sounding_dict['ATX'] = sounding_dict['ATX'] + 273.15
sounding_dict['DENS'] = density_from_p_Tv(flight_data['PSX'].values*100, flight_data['TVIR'].values+273.15)
sounding_dict['QL'] = flight_data['PLWCC'].values/sounding_dict['DENS']
sounding_dict['THETAL'] = get_liquid_water_theta(
sounding_dict['ATX'], sounding_dict['THETA'], sounding_dict['QL'])
sounding_dict['QV'] = flight_data['MR'].values/(1+flight_data['MR'].values/1000)
decoupling_dict = calc_decoupling_and_inversion_from_sounding(sounding_dict, usetheta=usetheta)
# zi_dict = calc_zi_from_sounding(sounding_dict)
return {**decoupling_dict}
def calculate_LTS(t_700, t_1000):
"""calculate lower tropospheric stability
t_700: 700 hPa temperature in Kelvin
t_1000: 1000 hPa temperature in Kelvin
returns: lower tropospheric stability in Kelvin
"""
theta_700 = theta_from_p_t(p=700.0, t=t_700)
lts = theta_700-t_1000
return lts
def calculate_moist_adiabatic_lapse_rate(t, p):
"""calculate moist adiabatic lapse rate from pressure, temperature
p: pressure in hPa
t: temperature in Kelvin
returns: moist adiabatic lapse rate in Kelvin/m
"""
es = 611.2*np.exp(17.67*(t-273.15)/(t-29.65)) # Bolton formula, es in Pa
qs = 0.622*es/(p*100-0.378*es)
num = 1 + lv*qs/(Rdry*t)
denom = 1 + lv**2*qs/(cp*Rvap*t**2)
gamma = g/cp*(1-num/denom)
return gamma
def theta_from_p_t(p, t, p0=1000.0):
"""calculate potential temperature from pressure, temperature
p: pressure in hPa
t: temperature in Kelvin
returns: potential temperature in Kelvin
"""
theta = t * (p0/p)**(Rdry/cp)
return theta
def calculate_LCL(t, t_dew, z=0.0):
"""calculate lifting condensation level from temperature, dew point, and altitude
t: temperature in Kelvin
t_dew: dew point temperature in Kelvin
z: geopotential height in meters. defaults to 0
returns: lifting condensation level in meters
raises: ValueError if any dew points are above temperatures (supersaturation)
"""
if np.any(t_dew > t):
t_dew = np.minimum(t, t_dew)
# raise ValueError('dew point temp above temp, that\'s bananas')
return z + 125*(t - t_dew)
def calculate_EIS(t_1000, t_850, t_700, z_1000, z_700, r_1000):
"""calculate estimated inversion strength from temperatures, heights, relative humidities
t_1000, t_850, t_700: temperature in Kelvin at 1000, 850, and 700 hPa
z_1000, z_700: geopotential height in meters at 1000 and 700 hPa
r_1000: relative humidity in % at 1000 hPa
returns: estimated inversion strength (EIS) in Kelvin
"""
if hasattr(r_1000, '__iter__'):
r_1000[r_1000>100] = 100 # ignoring supersaturation for lcl calculation
t_dew = t_1000-(100-r_1000)/5.0
lcl = calculate_LCL(t=t_1000, t_dew=t_dew, z=z_1000)
lts = calculate_LTS(t_700=t_700, t_1000=t_1000)
gamma_850 = calculate_moist_adiabatic_lapse_rate(t=t_850, p=850)
eis = lts - gamma_850*(z_700-lcl)
return eis
# def DEC_inv_layer_from_sounding(sounding):
# rh = sounding['RHUM']
# z = sounding['GGALT']
# i_above_inv = np.where(rh<60)[0]
# z_above_inv = z[i_above_inv]
# if np.any(i_above_inv):
# z_mid = np.min(z_above_inv)
# else:
# z_mid = np.nan
# return {'z_mid': z_mid}
# def DEC_calc_decoupling_from_sounding(sounding_dict, usetheta=False, get_jumps=True, smooth_t=True):
# z = sounding_dict['GGALT']
# theta = sounding_dict['THETA']
# theta_e = sounding_dict['THETAE']
# qv = sounding_dict['QV']
# t = sounding_dict['ATX']
# if 'PSXC' in sounding_dict.keys():
# p = sounding_dict['PSXC']
# else:
# p = sounding_dict['PSX']
# if not usetheta:
# theta_l = sounding_dict['THETAL']
# ql = sounding_dict['QL']
# if np.all(np.isnan(ql)):
# qt = qv
# else:
# qt = qv + ql
# else:
# theta_l = sounding_dict['THETA']
# qt = qv
# zi = heffter_pblht_1D(z, theta)
# upper_25 = zi['z_bot'] - (zi['z_bot'] - min(z))/4.
# u_i = np.logical_and(z > upper_25, z < zi['z_bot'])
# lower_25 = min(z) + (zi['z_bot'] - min(z))/4.
# l_i = np.logical_and(z < lower_25, z > min(z))
# ft_base = zi['z_bot']+500
# ft_top = ft_base + 500
# l_ft = np.logical_and(z < ft_top, z > ft_base)
# buncha_nans = {"d_qt": np.nan, "d_theta_e": np.nan, "d_theta_l": np.nan,
# "alpha_thetal": np.nan, "alpha_qt":np.nan, "alpha_thetae": np.nan,
# "d_q_inv": np.nan, "d_t_inv": np.nan,
# "t_below_inv": np.nan, "t_above_inv": np.nan, "q_below_inv": np.nan, "q_above_inv": np.nan,
# "z_top": np.nan, "z_mid": np.nan, "z_bot": np.nan, "i_top": np.nan, "i_mid": np.nan, "i_bot": np.nan}
# if zi['z_bot'] - min(z) < 300 or np.sum(l_ft) == 0:
# return buncha_nans
# with warnings.catch_warnings():
# warnings.simplefilter("ignore", category=RuntimeWarning)
# theta_e_sml = np.nanmean(theta_e[l_i])
# theta_e_bzi = np.nanmean(theta_e[u_i])
# theta_e_uzi = np.nanmean(theta_e[l_ft])
# theta_l_sml = np.nanmean(theta_l[l_i])
# theta_l_bzi = np.nanmean(theta_l[u_i])
# theta_l_uzi = np.nanmean(theta_l[l_ft])
# qt_sml = np.nanmean(qt[l_i])
# qt_bzi = np.nanmean(qt[u_i])
# qt_uzi = np.nanmean(qt[l_ft])
# d_theta_e = theta_e_bzi - theta_e_sml
# d_theta_l = theta_l_bzi - theta_l_sml
# d_qt = qt_bzi - qt_sml
# alpha_thetal = (theta_l_bzi - theta_l_sml)/(theta_l_uzi - theta_l_sml)
# alpha_qt = (qt_bzi - qt_sml)/(qt_uzi - qt_sml)
# alpha_thetae = (theta_e_bzi - theta_e_sml)/(theta_e_uzi - theta_e_sml)
# # getting jumps across the inversion, old-fashioned way. bad for fuzzy inversions
# # z_inv = inv_layer_from_sounding(sounding_dict)['z_mid']
# # i_below_inv = np.logical_and(z > z_inv-200, z < z_inv)
# # i_above_inv = np.logical_and(z > z_inv, z < z_inv+200)
# # q_below_inv = np.nanmax(qt[i_below_inv])
# # q_above_inv = np.nanmin(qt[i_above_inv])
# # d_q_inv = q_above_inv - q_below_inv
# # t_below_inv = np.nanmin(theta[i_below_inv])
# # t_above_inv = np.nanmax(theta[i_above_inv])
# # d_t_inv = t_above_inv - t_below_inv
# if get_jumps:
# ### moist adiabatic way
# gamma_moist = get_moist_adiabatic_lapse_rate(T=t, p=p)*1000
# if smooth_t:
# gamma = -np.gradient(smooth(t, window_len=31), z)*1000
# else:
# gamma = -np.gradient(t, z)*1000
# gamma[np.gradient(z)>-1] = np.nan
# gamma[z<330] = np.nan
# gamma[z>3000] = np.nan
# gamma[np.abs(gamma)>100] = np.nan
# gamma_diff = (gamma-gamma_moist)/1000
# # import matplotlib.pyplot as plt
# # plt.plot(gamma, z)
# # plt.ylim([0, 3000])
# # raise ValueError('hahahah')
# i_mid = np.nanargmin(gamma)
# if np.isnan(i_mid):
# print('no i_mid')
# return buncha_nans
# z_mid = z[i_mid]
# max_gap = gamma[i_mid] - gamma_moist[i_mid]
# # z_bot = np.max(z[np.logical_and(z<z[i_mid], gamma>gamma_moist)])
# try:
# z_bot = np.max(z[np.logical_and(z<z[i_mid], gamma-gamma_moist>max_gap/4)])
# except ValueError as v: # no crossing of the max_gap/4 line go for smallest gap below zmid
# cands = z<z[i_mid]
# if not np.any(cands):
# raise ValueError("no values below inversion middle!")
# z_bot = z[cands][np.argmin(np.abs(gamma[cands]-gamma_moist[cands]))]
# i_bot = np.argwhere(z==z_bot)[0][0]
# i_bot = np.argwhere(z==z_bot)[0][0]
# top_candidates = np.logical_and(z>z[i_mid], gamma-gamma_moist>max_gap/4)
# if np.any(top_candidates):
# z_top = np.min(z[top_candidates])
# i_top = np.argwhere(z==z_top)[0][0]
# else:
# cands = z>z[i_mid]
# if not np.any(cands):
# import matplotlib.pyplot as plt
# plt.plot(theta, z)
# plt.figure()
# plt.plot(gamma, z)
# raise ValueError("no values above inversion middle!")
# z_top = z[cands][np.argmin(np.abs(gamma[cands]-gamma_moist[cands]))]
# i_top = np.argwhere(z==z_top)[0][0]
# i_upper = np.logical_and(z<=z_top, z>=z_mid)
# if np.sum(i_upper) == 0:
# print("error: no upper inv layer: z_top: {} z_mid: {}".format(z_top, z_mid))
# return buncha_nans
# i_lower = np.logical_and(z>z_bot, z<z_mid)
# q_above_inv = qt[i_top]
# q_below_inv = qt[i_bot]
# d_q_inv = q_above_inv - q_below_inv
# t_below_inv = t[i_bot]
# i_inv = np.logical_and(z>z_bot, z<z_top)
# d_t_inv = integrate.trapz(gamma_diff[i_inv], z[i_inv])
# t_above_inv = t_below_inv + d_t_inv
# else:
# i_bot, i_mid, i_top, z_bot, z_mid, z_top, q_above_inv, q_below_inv, t_above_inv, t_below_inv, d_q_inv, d_t_inv = np.nan, \
# np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# return{"d_qt": d_qt, "d_theta_e": d_theta_e, 'd_theta_l': d_theta_l,
# "alpha_thetal": alpha_thetal, "alpha_qt":alpha_qt, "alpha_thetae": alpha_thetae,
# "d_q_inv": d_q_inv, "d_t_inv": d_t_inv,
# "t_below_inv": t_below_inv, "t_above_inv": t_above_inv, "q_below_inv": q_below_inv, "q_above_inv": q_above_inv,
# "z_top": z_top, "z_mid": z_mid, "z_bot": z_bot, "i_top": i_top, "i_mid": i_mid, "i_bot": i_bot }
# def DEC_calc_zi_from_sounding(sounding_dict):
# z = sounding_dict['GGALT']
# theta = sounding_dict['THETA']
# RH = sounding_dict['RHUM']
# T = sounding_dict['ATX']
# zi_dict = {}
# # zi_dict['Rich'] = mu.Ri_pbl_ht(u, v, q, T, z, smooth=True)
# zi_dict['RH50'] = RH_50_pblht_1d(z, RH)
# zi_dict['RHCB'] = RH_fancy_pblht_1d(z, RH)
# zi_dict['Heff'] = heffter_pblht_1D(z, theta)
# zi_dict['Heff']['T_bot'] = T[zi_dict['Heff']['i_bot']]
# zi_dict['Heff']['T_top'] = T[zi_dict['Heff']['i_top']]
# zi_dict['lat'] = np.nanmean(sounding_dict['GGLAT'])
# zi_dict['lon'] = np.nanmean(sounding_dict['GGLON'])
# zi_dict['time'] = sounding_dict['TIME'][0]
# zi_dict['lon_p'] = -140 + 0.8*(zi_dict['lon']+140) + 0.4*(zi_dict['lat']-30)
# return zi_dict
def DEC_heffter_pblht_1D(z, theta, find_top=False):
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
"""
[courtesy of jmcgibbon]
[made a little better by jkcm]
Given height and theta returns
the planetary boundary layer height from the Heffter criteria and the
index of that height in the z-array. Assumes the data is 1-D with the
axis being height. Assumes height in meters, and theta in K.
"""
if z.shape != theta.shape:
raise ValueError('z and theta must have the same shape')
if len(z.shape) != 1: # height axis
raise ValueError('data has an invalid number of dimensions')
if not (z < 4000).any():
raise ValueError('must have data below 4000m')
# z = moving_average(z, n=5)
# theta = moving_average(theta, n=3)
z = smooth(z, window_len=15)
theta = smooth(theta, window_len=15)
flipped = False
# if np.all(z[100:-100] != sorted(z[100:-100])): # not in ascending order
# if np.all(z[100:-100:-1] == sorted(z[100:-100])): # in descending order
if z[0] > z[-1]: # starts off higher
if True:
z = z[::-1]
theta = theta[::-1]
flipped = True
else:
raise ValueError("data not in ascending or descending order")
dtheta = np.diff(theta)
dz = np.diff(z)
dtheta_dz = np.zeros_like(dtheta)
valid = dz != 0
dtheta_dz[valid] = dtheta[valid]/dz[valid]
del valid
in_inversion = False
found_inversion = False
found_top = False
theta_bot = np.nan
z_bot = np.nan
i_bot = np.nan
theta_top = np.nan
i_top = np.nan
z_top = np.nan
for i in range(z.shape[0]-1): # exclude top where dtheta_dz isn't defined
if z[i] > 4000.:
# not allowed to have inversion height above 4km
break
if in_inversion:
# check if we're at PBL top
if theta[i] - theta_bot > 2:
found_inversion = True
theta_top = theta[i]
i_top = i
z_top = z[i]
if not find_top:
break
else:
break
#keep going up until we break the
# check if we're still in an inversion
# layer_dtheta_dz = (theta[i] - theta_bot)/(z[i]-z_bot)
# if layer_dtheta_dz > 0.005:
if dtheta_dz[i] > 0.005: # criterion for being in inversion
pass # still in inversion, keep going
else:
in_inversion = False
theta_bot = np.nan
z_bot = np.nan
i_bot = np.nan
else:
if dtheta_dz[i] > 0.005: # just entered inversion
theta_bot = theta[i]
i_bot = i
z_bot = z[i]
in_inversion = True
else:
# still not in inversion, keep going
pass
if found_inversion:
if flipped:
i_top = len(z)-i_top-1
i_bot = len(z)-i_bot-1
return {"z_top": z_top, "theta_top": theta_top, "i_top": i_top,
"z_bot": z_bot, "theta_bot": theta_bot, "i_bot": i_bot,
"inversion": True}
else:
# we didn't find a boundary layer height
# return height of highest dtheta_dz below 4000m
i_max = np.where(dtheta_dz == dtheta_dz[z[:-1] < 4000].max())[0][0]
z_max = z[i_max]
theta_max = theta[i_max]
if flipped:
i_max = len(z)-i_max-1
return {"z_top": z_max, "theta_top": theta_max, "i_top": i_max,
"z_bot": z_max, "theta_bot": theta_max, "i_bot": i_max,
"inversion": False}
def DEC_heffter_pblht_2d(z, theta, axis=0, handle_nans=False):
dummy = heffter_pblht_1D(np.arange(100), np.arange(100))
res_dict = {key: np.empty(z.shape[axis]) for key in dummy.keys()}
result = np.empty(z.shape[axis])
for i,(z_i,theta_i) in enumerate(zip(z, theta)):
try:
res = heffter_pblht_1D(z_i,theta_i)
except ValueError as e:
if handle_nans:
res = {"z_top": float('nan'), "theta_top": float('nan'), "i_top": float('nan'),
"z_bot": float('nan'), "theta_bot": float('nan'), "i_bot": float('nan'),
"inversion": False}
else:
raise e
for key, value in res.items():
res_dict[key][i] = value
return res_dict
```
#### File: jkcm/lagrangian-cset/unified_traj_data.py
```python
import utils
import met_utils
import lagrangian_case as lc
import datetime as dt
import numpy as np
import os
import xarray as xr
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,5
import matplotlib.pyplot as plt
import glob
import pandas as pd
from itertools import cycle
from geographiclib.geodesic import Geodesic
import time
def xarray_from_trajectory(rfnum, trajnum, trajectory_type='500m_+72'):
tdump = utils.load_flight_trajectory(rfnum, trajnum, trajectory_type=trajectory_type)
ds = xr.Dataset.from_dataframe(tdump).drop(['tnum', 'gnum', 'age'])
ds = ds.rename({'dtime': 'time'})
# assigning global attributes
global_attrs = [
{'Title': "CSET Unified Trajectory Product"},
{'institution': "Department of Atmospheric Sciences, University of Washington"},
{'contact': "<EMAIL>"},
{'trajectory_setup': "Trajectories were run isobarically " +
"from an initialization height of 500m " +
"for 72 hours, using GDAS analysis met data"},
{'HYSPLIT': "Trajectories run using HYSPLIT (Hybrid Single "+
"Particle Lagrangian Integrated Trajectory Model). "+
"Acknowledgements to the NOAA Air Resources Laboratory "+
"(ARL) for the provision of the HYSPLIT transport and "+
"dispersion model used in this publication."},
{'references': "<NAME>., <NAME>, <NAME>., Stunder, "+
"B.J.B., <NAME>., and <NAME>., (2015). NOAA's "+
"HYSPLIT atmospheric transport and dispersion modeling "+
"system, Bull. Amer. Meteor. Soc., 96, 2059-2077, "+
"http://dx.doi.org/10.1175/BAMS-D-14-00110.1"},
{'CSET_flight': rfnum},
{'flight_trajectory': str(trajnum)}]
for i in global_attrs: # note: an OrderedDict would be tidier, but does not unpack in order
ds = ds.assign_attrs(**i)
# assigning variable attributes
var_attrs = {
'lon': {'long_name': 'longitude',
'units': 'degrees N'},
'lat': {'long_name': 'latitude',
'units': 'degrees E'},
'fhour': {'long_name': 'forecast_lead_time',
'units': 'hours'},
'pres': {'long_name':'trajectory_pressure',
'units': 'hPa'},
'height': {'long_name': 'trajectory_height_above_ground',
'units': 'meters'}}
for k,v in var_attrs.items():
ds[k] = ds[k].assign_attrs(**v)
return ds
def save_trajectory_to_netcdf(ds, location):
ds.to_netcdf(location)
def add_ERA_ens_to_trajectory(ds, box_degrees=2):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
ens_files = [os.path.join(utils.ERA_ens_source, i) for i in sorted(os.listdir(utils.ERA_ens_source))]
with xr.open_mfdataset(sorted(ens_files)) as data:
data = data.rename({'level': 'ens_level'})
ds.coords['number'] = data.coords['number']
ds.coords['ens_level'] = data.coords['ens_level']
if 'w' in data.data_vars.keys() and 'sp' in data.data_vars.keys():
data['dspdt'] = (data.sp.dims, np.gradient(data.sp, np.median(np.gradient(data.time.values)/np.timedelta64(1, 's')), axis=0),
{'units': "Pa s**-1", 'long_name': "Surface pressure tendency", 'standard_name': 'tendency_of_surface_air_pressure'})
data['w_corr'] = (data.w.dims, data.w - data.dspdt, {'units': data.w.units, 'long_name': 'Vertical velocity (sp-corrected)'})
for var in data.data_vars.keys():
var_shape = data[var].isel(time=0, latitude=0, longitude=0).shape
vals = []
for (lat, lon, time) in zip(lats, lons%360, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
vals.append(np.full(var_shape, float('nan'), dtype='float'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss_shape = tuple([v for v,i in zip(z.shape,z.dims) if i in ['latitude', 'longitude'] ])
gauss = utils.gauss2D(shape=gauss_shape, sigma=gauss_shape[-1])
filtered = z * gauss
# filtered2 = z.values * gauss
vals.append(filtered.sum(dim=('latitude', 'longitude')).values)
ds['ERA_ens_'+var] = (tuple(x for x in data[var].dims if x not in ['latitude', 'longitude']), np.array(vals), data[var].attrs)
# return ds
print('adding ensemble temperatures')
ens_temp_files = [os.path.join(utils.ERA_ens_temp_source, i) for i in sorted(os.listdir(utils.ERA_ens_temp_source))]
with xr.open_mfdataset(sorted(ens_temp_files)) as data:
data = data.rename({'level': 'ens_level'})
# ds.coords['number'] = data.coords['number']
# ds.coords['ens_level'] = data.coords['ens_level']
for var in data.data_vars.keys():
var_shape = data[var].isel(time=0, latitude=0, longitude=0).shape
vals = []
for (lat, lon, time) in zip(lats, lons, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
print(np.max(data.coords['latitude'].values), np.min(data.coords['latitude'].values))
print(np.max(data.coords['longitude'].values), np.min(data.coords['longitude'].values))
vals.append(np.full(var_shape, float('nan'), dtype='float'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss_shape = tuple([v for v,i in zip(z.shape,z.dims) if i in ['latitude', 'longitude'] ])
gauss = utils.gauss2D(shape=gauss_shape, sigma=gauss_shape[-1])
filtered = z * gauss
# filtered2 = z.values * gauss
vals.append(filtered.sum(dim=('latitude', 'longitude')).values)
ds['ERA_ens_'+var] = (tuple(x for x in data[var].dims if x not in ['latitude', 'longitude']), np.array(vals), data[var].attrs)
return ds
def add_ERA_to_trajectory(ds, box_degrees=2):
"""Retrieve ERA5 data in a box around a trajectory
Assumes ERA5 data is 0.3x0.3 degrees
Returns an xarray Dataset
"""
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
unique_days = set([utils.as_datetime(i).date() for i in times])
files = [os.path.join(utils.ERA_source, "ERA5.pres.NEP.{:%Y-%m-%d}.nc".format(i))
for i in unique_days]
flux_files = [os.path.join(utils.ERA_source, "ERA5.flux.NEP.{:%Y-%m-%d}.nc".format(i))
for i in unique_days]
with xr.open_mfdataset(sorted(files)) as data:
#return_ds = xr.Dataset(coords={'time': ds.coords['time'], 'level': data.coords['level']})
ds.coords['level'] = data.coords['level']
#adding in q:
T = data['t'].values
RH = data['r'].values
p = np.broadcast_to(data.coords['level'].values[None, :, None, None], T.shape)*100
q = utils.qv_from_p_T_RH(p, T, RH)
data['q'] = (('time', 'level', 'latitude', 'longitude'), q)
data['q'] = data['q'].assign_attrs({'units': "kg kg**-1",
'long_name': "specific_humidity",
'dependencies': 'ERA_t, ERA_p, ERA_r'})
MR = q/(1-q)
data['MR'] = (('time', 'level', 'latitude', 'longitude'), MR)
data['MR'] = data['MR'].assign_attrs({'units': "kg kg**-1",
'long_name': "mixing_ratio",
'dependencies': 'ERA_t, ERA_p, ERA_r'})
# adding gradients in for z, t, and q. Assuming constant grid spacing.
for var in ['t', 'q', 'z', 'u', 'v', 'MR']:
[_,_,dvardj, dvardi] = np.gradient(data[var].values)
dlatdy = 360/4.000786e7 # degrees lat per meter y
def get_dlondx(lat) : return(360/(np.cos(np.deg2rad(lat))*4.0075017e7))
lat_spaces = np.diff(data.coords['latitude'].values)
lon_spaces = np.diff(data.coords['longitude'].values)
assert(np.allclose(lat_spaces, -0.3, atol=0.01) and np.allclose(lon_spaces, 0.3, atol=0.05))
dlondi = np.mean(lon_spaces)
dlatdj = np.mean(lat_spaces)
dlondx = get_dlondx(data.coords['latitude'].values)
dvardx = dvardi/dlondi*dlondx[None,None,:,None]
dvardy = dvardj/dlatdj*dlatdy
data['d{}dx'.format(var)] = (('time', 'level', 'latitude', 'longitude'), dvardx)
data['d{}dy'.format(var)] = (('time', 'level', 'latitude', 'longitude'), dvardy)
grad_attrs = {'q': {'units': "kg kg**-1 m**-1",
'long_name': "{}_gradient_of_specific_humidity",
'dependencies': "ERA_t, ERA_p, ERA_r"},
't': {'units': "K m**-1",
'long_name': "{}_gradient_of_temperature",
'dependencies': "ERA_t"},
'z': {'units': "m**2 s**-2 m**-1",
'long_name': "{}_gradient_of_geopotential",
'dependencies': "ERA_z"},
'u': {'units': "m s**-1 m**-1",
'long_name': "{}_gradient_of_zonal_wind",
'dependencies': "ERA_u"},
'v': {'units': "m s**-1 m**-1",
'long_name': "{}_gradient_of_meridional_wind",
'dependencies': "ERA_v"},
'MR': {'units': "kg kg**-1 m**-1",
'long_name': "{}_gradient_of_mixing_ratio",
'dependencies': "ERA_t, ERA_p, ERA_r"}}
for key, val in grad_attrs.items():
for (n, drn) in [('x', 'eastward'), ('y', 'northward')]:
attrs = val.copy()
var = 'd{}d{}'.format(key, n)
attrs['long_name'] = attrs['long_name'].format(drn)
data[var] = data[var].assign_attrs(attrs)
for var in data.data_vars.keys():
vals = []
for (lat, lon, time) in zip(lats, lons%360, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
vals.append(np.full_like(data.coords['level'], float('nan'), dtype='float'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
# print(time)
# print(x.time[0])
z = x.sel(method='nearest', tolerance=np.timedelta64(1, 'h'), time=time)
#z = y.sel(method='nearest', tolerance=50, level=pres)
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
# print(z.shape)
gauss = utils.gauss2D(shape=z.shape[1:], sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered, axis=(1,2)))
ds['ERA_'+var] = (('time', 'level'), np.array(vals))
ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(data[var].attrs)
t_1000 = ds.ERA_t.sel(level=1000).values
theta_700 = met_utils.theta_from_p_T(p=700, T=ds.ERA_t.sel(level=700).values)
LTS = theta_700-t_1000
ds['ERA_LTS'] = (('time'), np.array(LTS))
ds['ERA_LTS'] = ds['ERA_LTS'].assign_attrs(
{"long_name": "Lower tropospheric stability",
"units": "K",
"_FillValue": "NaN"})
t_dew = t_1000-(100-ds.ERA_r.sel(level=1000).values)/5
lcl = met_utils.get_LCL(t=t_1000, t_dew=t_dew, z=ds.ERA_z.sel(level=1000).values/9.81)
z_700 = ds.ERA_z.sel(level=700).values/9.81
gamma_850 = met_utils.get_moist_adiabatic_lapse_rate(ds.ERA_t.sel(level=850).values, 850)
eis = LTS - gamma_850*(z_700-lcl)
ds['ERA_EIS'] = (('time'), np.array(eis))
ds['ERA_EIS'] = ds['ERA_EIS'].assign_attrs(
{"long_name": "Estimated inversion strength",
"units": "K",
"_FillValue": "NaN"})
with xr.open_mfdataset(sorted(flux_files)) as flux_data:
for var in flux_data.data_vars.keys():
# if var not in ['sshf', 'slhf']:
# continue
vals = []
for (lat, lon, time) in zip(lats, lons%360, times):
if lat > np.max(flux_data.coords['latitude']) or lat < np.min(flux_data.coords['latitude']) or \
lon > np.max(flux_data.coords['longitude']) or lon < np.min(flux_data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
vals.append(float('nan'))
continue
x = flux_data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
gauss = utils.gauss2D(shape=z.shape, sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered))
ds['ERA_'+var] = (('time'), np.array(vals))
ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(flux_data[var].attrs)
return ds
def add_MERRA_to_trajectory(ds, box_degrees=2):
lats, lons, times = ds.lat.values, ds.lon.values, utils.as_datetime(ds.time.values)
unique_days = set([utils.as_datetime(i).date() for i in times])
files = [os.path.join(utils.MERRA_source, "svc_MERRA2_400.inst3_3d_aer_Nv.{:%Y%m%d}.nc4".format(i))
for i in unique_days]
with xr.open_mfdataset(sorted(files)) as data:
# data = xr.open_mfdataset(sorted(files))
# if True:
ds.coords['lev'] = data.coords['lev']
for var in data.data_vars.keys():
# var = 'RH'
# if True:
vals = []
for (lat, lon, time) in zip(lats, lons, times):
# lat, lon, time = lats[1], lons[1], times[1]
# if True:
time = time.replace(tzinfo=None)
x = data[var].sel(lon=slice(lon - box_degrees/2, lon + box_degrees/2),
lat=slice(lat - box_degrees/2, lat + box_degrees/2))
y = x.sel(method='nearest', tolerance=dt.timedelta(minutes=119), time=time)
z = y.sel(method='nearest', tolerance=50, level=pres)
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss = utils.gauss2D(shape=z.shape[1:], sigma=z.shape[1])
filtered = z.values * gauss
vals.append(np.sum(filtered, axis=(1,2)))
ds['MERRA2_'+var] = (('time', 'level'), np.array(vals))
ds['MERRA2_'+var] = ds['MERRA2_'+var].assign_attrs(data[var].attrs)
return ds
# var_list = ['SO4', 'RH']
# MERRA_ds = utils.get_MERRA_data(var_list=var_list, lats=lats, lons=lons, times=times,
# pressures=pressures, box_degrees=2)
# ds = xr.merge([ds, MERRA_ds.rename({'RH': 'MERRA_RH', 'SO4': 'MERRA_SO4', 'time': 'dtime'})])
# t_data.append(ds)
def add_speeds_to_trajectories(ds):
"""Add speed variables to trajectory. used centered difference of distances traveled
"""
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
heading_starts, heading_ends, seg_speeds = [], [], []
for i in range(len(lats)-1):
geod = Geodesic.WGS84.Inverse(lats[i], lons[i], lats[i+1], lons[i+1])
dtime = (times[i+1]-times[i])/np.timedelta64(1, 's')
heading_starts.append(geod['azi1'])
heading_ends.append(geod['azi2'])
seg_speeds.append(geod['s12']/dtime)
#speeds are centered difference, except at start and end, where they are speeds of
#first and last trajectory segments
#headings are average of end azimuth of previous segment/start azimuth of next geodesic segment,
#except at start and end, where are just the start/end azimuths of the first/last geodesic
speeds = np.mean(np.vstack([seg_speeds+[seg_speeds[-1]],[seg_speeds[0]]+seg_speeds]), axis=0)
# headings = np.mean(np.vstack([[heading_starts[0]]+heading_ends, heading_starts+[heading_ends[-1]]]), axis=0) THIS HAD A BUG
def radial_mean(h1, h2):
diff = ((h2-h1)+180)%360-180
return h1 + diff/2
headings = radial_mean(np.array([heading_starts[0]]+heading_ends), np.array(heading_starts+[heading_ends[-1]]))
u = speeds*np.cos(np.deg2rad(90-headings))
v = speeds*np.sin(np.deg2rad(90-headings))
ds['traj_u'] = (('time'), u)
ds['traj_v'] = (('time'), v)
ds['traj_hdg'] = (('time'), headings)
ds['traj_spd'] = (('time'), speeds)
return ds
def add_advection_to_trajectory(ds):
"""Add advection to trajectory after adding ERA data
"""
names = dict(u='ERA_u', v='ERA_v', u_t='traj_u', v_t='traj_v',
dtdx='ERA_dtdx', dtdy='ERA_dtdy', dqdx='ERA_dqdx', dqdy='ERA_dqdy', dMRdx='ERA_dMRdx', dMRdy='ERA_dMRdy')
assert np.all([i in ds.data_vars.keys() for i in names.values()])
rel_adv_of_T = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dtdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dtdy']].values)
rel_adv_of_q = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dqdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dqdy']].values)
rel_adv_of_MR = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dMRdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dMRdy']].values)
T_adv_attr = {'units': "K s**-1",
'long_name': "trajectory_relative_advection_of_temperature",
'dependencies': 'ERA_t, traj_u, traj_v, ERA_u, ERA_v'}
q_adv_attr = {'units': "kg kg**-1 s**-1",
'long_name': "trajectory_relative_advection_of_specific_humidity",
'dependencies': 'ERA_q, traj_u, traj_v, ERA_u, ERA_v'}
MR_adv_attr = {'units': "kg kg**-1 s**-1",
'long_name': "trajectory_relative_advection_of_mixing ratio",
'dependencies': 'ERA_q, traj_u, traj_v, ERA_u, ERA_v'}
ds['ERA_T_adv'] = (('time', 'level'), rel_adv_of_T)
ds['ERA_T_adv'] = ds['ERA_T_adv'].assign_attrs(**T_adv_attr)
ds['ERA_q_adv'] = (('time', 'level'), rel_adv_of_q)
ds['ERA_q_adv'] = ds['ERA_q_adv'].assign_attrs(**q_adv_attr)
ds['ERA_MR_adv'] = (('time', 'level'), rel_adv_of_MR)
ds['ERA_MR_adv'] = ds['ERA_MR_adv'].assign_attrs(**MR_adv_attr)
return ds
def add_upwind_profile_to_trajectory(ds, dist=200, box_avg=2):
"""Add 'upwind' profile (not a true profile since the location varies with height)
for alternative nudging method.
Add only T_upwind, q_upwind, and MR_upwind vars
"""
T_upwind = np.full_like(ds.ERA_t, np.nan)
q_upwind = np.full_like(ds.ERA_q, np.nan)
MR_upwind = np.full_like(ds.ERA_MR, np.nan)
for i, t in enumerate(ds.time):
for j, l in enumerate(ds.level):
u = ds.ERA_u.sel(time=t, level=l)
v = ds.ERA_v.sel(time=t, level=l)
def add_ERA_sfc_data(ds, box_degrees=2):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
unique_days = set([utils.as_datetime(i).date() for i in times])
sfc_files = [os.path.join(utils.ERA_source, "ERA5.sfc.NEP.{:%Y-%m-%d}.nc".format(i))
for i in unique_days]
with xr.open_mfdataset(sorted(sfc_files)) as data:
for var in data.data_vars.keys():
vals = []
for (lat, lon, time) in zip(lats, lons%360, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
vals.append(float('nan'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', tolerance=np.timedelta64(minutes=59), time=time)
gauss = utils.gauss2D(shape=z.shape, sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered))
ds['ERA_'+var] = (('time'), np.array(vals))
ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(data[var].attrs)
# lhf = ds['ERA_ie'].values*2264705
# ds['ERA_ilhf'] = (('time'), lhf)
# ds['ERA_ilhf'] = ds['ERA_ilhf'].assign_attrs({"long_name": "Instantaneous surface latent heat flux",
# "units": "W m**-2",
# "_FillValue": "NaN"})
# ds['ERA_'+var] = ds['ERA_'+var]
return ds
def add_GOES_obs(ds):
#rfnum = ds['']
return ds
def add_MODISPBL_to_trajectory(ds, box_degrees=3):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
MODIS_day_idx = np.argwhere([i.hour == 23 for i in utils.as_datetime(times)]).squeeze()
MODIS_night_idx = np.argwhere([i.hour == 11 for i in utils.as_datetime(times)]).squeeze()
# dayfile = '/home/disk/eos4/jkcm/Data/CSET/Ryan/Daily_1x1_JHISTO_CTH_c6_day_v2_calboxes_top10_Interp_hif_zb_2011-2016.nc'
dayfile = '/home/disk/eos4/jkcm/Data/CSET/Ryan/Daily_1x1_JHISTO_CTH_c6_day_v2_calboxes_top10_Interp_hif_zb_2011-2016_corrected.nc'
nightfile = '/home/disk/eos4/jkcm/Data/CSET/Ryan/Daily_1x1_JHISTO_CTH_c6_night_v2_calboxes_top10_Interp_hif_zb_2011-2016.nc'
vals = []
stds = []
nanfrac = []
for i in range(len(times)):
if i in MODIS_day_idx:
f = dayfile
elif i in MODIS_night_idx:
f = nightfile
else:
vals.append(np.nan)
stds.append(np.nan)
nanfrac.append(np.nan)
continue
with xr.open_dataset(f) as data:
lat, lon, time = lats[i], lons[i], utils.as_datetime(times[i])
t_idx = np.argwhere(np.logical_and(data['days'].values == time.timetuple().tm_yday,
data['years'].values == time.year))[0][0]
x = data['cth'].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.isel(time=t_idx).values
vals.append(np.nanmean(z))
stds.append(np.nanstd(z))
nanfrac.append(np.sum(np.isnan(z))/z.size)
ds['MODIS_CTH'] = (('time'), np.array(vals))
ds['MODIS_CTH_std'] = (('time'), np.array(stds))
ds['MODIS_CTH_nanfrac'] = (('time'), np.array(nanfrac))
return ds
def make_trajectory(rfnum, trajnum, save=False, trajectory_type='500m_+72'):
ds = xarray_from_trajectory(rfnum, trajnum, trajectory_type)
ds = add_speeds_to_trajectories(ds)
print("adding ERA...")
ds = add_ERA_to_trajectory(ds)
print('adding advection...')
ds = add_advection_to_trajectory(ds)
print('adding ERA sfc data...')
ds = add_ERA_sfc_data(ds)
print('adding ERA ensemble data...')
ds = add_ERA_ens_to_trajectory(ds)
print('adding GOES data...')
ds = add_GOES_obs(ds)
print("adding MODIS...")
ds = add_MODISPBL_to_trajectory(ds)
# print("adding MERRA...")
# ds = add_MERRA_to_trajectory(ds)
if save:
save_trajectory_to_netcdf(ds, save)
return ds
if __name__ == "__main__":
force_override = True
for case_num, case in lc.all_cases.items():
print('working on case {}'.format(case_num))
# if case_num not in [6, 10]:
# continue
flight = case['TLC_name'].split("_")[1][:4].lower()
traj_list = case['TLC_name'].split('_')[2].split('-')
for dirn in ['forward', 'backward']:
nc_dirstring = '48h_backward' if dirn == 'backward' else '72h_forward'
for traj in traj_list:
name = os.path.join(utils.trajectory_netcdf_dir, "{}_{}_{}.nc".format(flight, nc_dirstring, traj))
print("working on {}...".format(os.path.basename(name)))
if os.path.exists(name):
print("already exists!")
if not force_override:
continue
else:
print('overriding')
os.rename(name, os.path.join(utils.trajectory_netcdf_dir, 'old', "{}_{}_{}.nc".format(flight, nc_dirstring, traj)))
# ds = make_trajectory(rfnum=flight, trajnum=float(traj), save=name);
trajectory_type = '500m_-48' if dirn == 'backward' else '500m_+72'
ds = make_trajectory(rfnum=flight, trajnum=float(traj), save=name, trajectory_type=trajectory_type);
#ds = add_ERA_sfc_data(ds)
#ds = make_trajectory(rfnum='rf06', trajnum=2.3, save=False)
#save_trajectory_to_netcdf(ds, r'/home/disk/eos4/jkcm/Data/CSET/model_forcings/rf06_traj_2.3_fullcolumn_withz.nc')
# all_trajs = {'rf06': [1.6, 2.0, 2.3, 2.6, 3.0],
# 'rf10': [5.5, 6.0]}
# for flight, traj_list in all_trajs.items():
# for traj in traj_list:
# name = os.path.join(utils.trajectory_netcdf_dir, "{}_MODIS_traj_{:0.1f}.nc".format(flight, traj))
# print("working on {}...".format(os.path.basename(name)))
# ds = make_trajectory(rfnum=flight, trajnum=traj, save=name);
# ds = make_trajectory(rfnum='rf06', trajnum=2.3, save=False)
# save_trajectory_to_netcdf(ds, r'/home/disk/eos4/jkcm/Data/CSET/Lagrangian_project/trajectory_files/rf06_MODIS_traj_2.3.nc')
# ds = make_trajectory(rfnum='rf10', trajnum=6.0, save=False)
# save_trajectory_to_netcdf(ds, r'/home/disk/eos4/jkcm/Data/CSET/Lagrangian_project/trajectory_files/rf10_MODIS_traj_6.0.nc')
``` |
{
"source": "jkcm/mesoscale-morphology",
"score": 2
} |
#### File: mesoscale-morphology/data_processing/MERRA_subset.py
```python
from Lagrangian_CSET import met_utils as mu
import numpy as np
import glob
import xarray as xr
import time
for year in [2014, 2015, 2016]:
for month in np.arange(1,13):
#MERRA_data = xr.open_mfdataset(sorted(glob.glob(r'/home/disk/eos4/jkcm/Data/CSET/MERRA/unified_2/*.unified*.nc4')), combine='by_coords')
# MERRA2_data = xr.open_mfdataset(sorted(glob.glob(r'/home/disk/eos4/jkcm/Data/MERRA/pressure/*.inst3_3d_asm_Np*.nc')), combine='by_coords')
# MERRA2_data = xr.open_dataset(r'/home/disk/eos4/jkcm/Data/MERRA/measures/MERRA2_400.inst3_3d_asm_Np.SEP.nc')
MERRA2_data = xr.open_mfdataset(f'/home/disk/eos4/jkcm/Data/MERRA/measures/lev/MERRA2_400.inst3_3d_asm_Np.{str(year)}{month:02}*.SUB.nc', combine='by_coords')
MERRA2_data['lon'] = MERRA2_data.lon%360
# MERRA2_csp = xr.open_dataset(r'/home/disk/eos4/jkcm/Data/MERRA/measures/MERRA2_400.tavg1_2d_csp_Nx.SEP.nc')
MERRA2_csp = xr.open_mfdataset(f'/home/disk/eos4/jkcm/Data/MERRA/measures/cloud/MERRA2_400.tavg1_2d_csp_Nx.{str(year)}{month:02}*.nc4.nc4', combine='by_coords')
MERRA2_csp['lon'] = MERRA2_csp.lon%360
# MERRA2_sfc = xr.open_mfdataset(sorted(glob.glob(r'/home/disk/eos4/jkcm/Data/MERRA/sfc/*.inst*.nc4')), combine='by_coords')
# MERRA2_sfc = xr.open_dataset(r'/home/disk/eos4/jkcm/Data/MERRA/measures/MERRA2_400.instU_2d_asm_Nx.SEP.nc')
MERRA2_sfc = xr.open_mfdataset(f'/home/disk/eos4/jkcm/Data/MERRA/measures/sfc/MERRA2_400.inst1_2d_asm_Nx.{str(year)}{month:02}*.nc4.nc4', combine='by_coords')
MERRA2_sfc['lon'] = MERRA2_sfc.lon%360
#add some sfc stuffs
time_arr = [True if i in MERRA2_data.time.values else False for i in MERRA2_sfc.time.values]
MERRA2_sfc_subs = MERRA2_sfc.isel(time=time_arr)
MERRA2_data['SST'] = MERRA2_sfc_subs['TS']
MERRA2_data['U10M'] = MERRA2_sfc_subs['U10M']
MERRA2_data['V10M'] = MERRA2_sfc_subs['V10M']
MERRA2_data['T2M'] = MERRA2_sfc_subs['T2M']
MERRA2_data['TQL'] = MERRA2_sfc_subs['TQL']
MERRA2_data['TQV'] = MERRA2_sfc_subs['TQV']
wspd = np.sqrt(MERRA2_sfc_subs['U10M'].values**2 + MERRA2_sfc_subs['V10M'].values**2)
MERRA2_sfc_subs['WSPD_10M'] = (('time', 'lat', 'lon'), wspd)
MERRA2_sfc_subs['WSPD_10M'] = MERRA2_sfc_subs['WSPD_10M'].assign_attrs(
{"long_name": "wind speed 10m",
"units": "m s**-1"})
MERRA2_data['WSPD_10M'] = MERRA2_sfc_subs['WSPD_10M']
#add some COSP stuffs
time_arr = np.full(MERRA2_csp.time.values.shape, False)
for i, t in enumerate(MERRA2_data.time.values):
time_arr[np.argmin(np.abs(MERRA2_csp.time.values-t))] = True
MERRA2_csp_subs = MERRA2_csp.isel(time=time_arr)
MERRA2_csp_subs = MERRA2_csp_subs.assign_coords(time=MERRA2_data.time)
MERRA2_data['ISCCPALB'] = MERRA2_csp_subs['ISCCPALB']
MERRA2_data['MDSCLDFRCLO'] = MERRA2_csp_subs['MDSCLDFRCLO']
MERRA2_data['MDSH2OPATH'] = MERRA2_csp_subs['MDSH2OPATH']
MERRA2_data['MDSH2OPATH'] = MERRA2_csp_subs['MDSH2OPATH']
MERRA2_data['MDSCLDSZH20'] = MERRA2_csp_subs['MDSCLDSZH20']
MERRA2_data['MDSOPTHCKH2O'] = MERRA2_csp_subs['MDSOPTHCKH2O']
#some spare levs
MERRA2_data["RH_700"] = MERRA2_data.RH.sel(lev=700)
MERRA2_data["T_700"] = MERRA2_data.T.sel(lev=700)
# #ADD SOME MORE MERRA VARS
t_1000 = MERRA2_data.T.sel(lev=1000)
theta_700 = mu.theta_from_p_T(p=700, T=MERRA2_data.T.sel(lev=700))
LTS = theta_700-t_1000
t_dew = t_1000-(100-100*MERRA2_data.RH.sel(lev=1000))/5
lcl = mu.get_LCL(t=t_1000, t_dew=t_dew, z=MERRA2_data.H.sel(lev=1000))
z_700 = MERRA2_data.H.sel(lev=700)
gamma_850 = mu.get_moist_adiabatic_lapse_rate(MERRA2_data.T.sel(lev=850), 850)
EIS = LTS - gamma_850*(z_700-lcl)
MERRA2_data['LTS'] = LTS
MERRA2_data['EIS'] = EIS
t_v = mu.tvir_from_T_w(MERRA2_data.T, MERRA2_data.QV)
rho = mu.density_from_p_Tv(MERRA2_data.lev*100, t_v)
MERRA2_data['dzdt'] = -MERRA2_data['OMEGA']/(9.81*rho)
MERRA2_data['div_700'] = MERRA2_data['dzdt'].sel(lev=700)/MERRA2_data['H'].sel(lev=700)
theta_sst = mu.theta_from_p_T(p=MERRA2_data.PS/100, T=MERRA2_data.SST)
theta_800 = mu.theta_from_p_T(p=800, T=MERRA2_data.T.sel(lev=800))
MERRA2_data['M'] = theta_sst - theta_800
[_, dtdi, dtdj] = np.gradient(MERRA2_data.SST)
#di is dlat, dj is dlon
lat_spaces = np.diff(MERRA2_data.coords['lon'].values)
lon_spaces = np.diff(MERRA2_data.coords['lon'].values)
assert(np.allclose(lat_spaces, 0.625, atol=0.01) and np.allclose(lon_spaces, 0.625, atol=0.05))
# PREVIOUSLY HAD NEGATIVE LAT_SPACES
dlondj = np.mean(lon_spaces)
dlatdi = np.mean(lat_spaces)
def get_dlondx(lat) : return(360/(np.cos(np.deg2rad(lat))*4.0075017e7))
dlondx = get_dlondx(MERRA2_data.coords['lat'].values)
dlatdy = 360/4.000786e7 # degrees lat per meter y
dtdx = dtdj/dlondj*dlondx[None,:,None]
dtdy = dtdi/dlatdi*dlatdy
T_adv = -(MERRA2_data.U10M.values*dtdx + MERRA2_data.V10M.values*dtdy)
MERRA2_data['T_adv'] = (('time', 'lat', 'lon'), T_adv, {'units': "K s**-1",
'long_name': "temperature_advection"})
#ADDING SOME MERRA DIVERGENCE STUFFS
dudi = np.gradient(MERRA2_data.U10M)[2]
dvdj = np.gradient(MERRA2_data.V10M)[1]
dlatdy = 360/4.000786e7 # degrees lat per meter y
def get_dlondx(lat) : return(360/(np.cos(np.deg2rad(lat))*4.0075017e7))
dlondx = get_dlondx(MERRA2_data.coords['lat'].values)
lat_spaces = np.diff(MERRA2_data.coords['lat'].values)
lon_spaces = np.diff(MERRA2_data.coords['lon'].values)
assert(np.allclose(lat_spaces, 0.5, atol=0.01) and np.allclose(lon_spaces, 0.625, atol=0.05))
dlondi = np.mean(lon_spaces)
dlatdj = np.mean(lat_spaces)
dudx = dudi/dlondi*dlondx[None, :, None]
dvdy = dvdj/dlatdj*dlatdy
div = dudx + dvdy
MERRA2_data['sfc_div'] = (('time', 'lat', 'lon'), div)
MERRA2_data['sfc_div'] = MERRA2_data['sfc_div'].assign_attrs(
{"long_name": "wind divergence 10m",
"units": "s**-1"})
MERRA_subset = MERRA2_data[['H', 'PS', 'SST', 'U10M', 'V10M', 'LTS', 'EIS', 'dzdt', 'sfc_div', 'RH_700', 'T_700', 'WSPD_10M', 'div_700',
'TQV', 'TQL', 'T2M', 'M', 'T_adv',
'ISCCPALB', 'MDSCLDFRCLO', 'MDSH2OPATH', 'MDSCLDSZH20', 'MDSOPTHCKH2O']]
for key, var in MERRA_subset.data_vars.items():
print(key)
print(var.dtype)
comp = dict(zlib=True, complevel=2)
MERRA_subset.to_netcdf(f'/home/disk/eos4/jkcm/Data/MERRA/measures/subset/2/MERRA2.unified_subset.{str(year)}{month:02}.nc', engine='h5netcdf', encoding={var: comp for var in MERRA_subset.data_vars})
```
#### File: mesoscale-morphology/data_processing/regrid_rain_rate.py
```python
import sys
import os
import glob
import numpy as np
import re
import xarray as xr
import datetime as dt
from itertools import product
from multiprocessing import Pool, cpu_count, current_process
from classified_cset.utils import LoopTimer
class Groupby:
# note: adapted from https://github.com/esantorella/hdfe. MIT license required upon publication
def __init__(self, keys):
self.keys, self.keys_as_int = np.unique(keys, return_inverse = True)
self.n_keys = max(self.keys_as_int) + 1
self.set_indices()
def set_indices(self):
self.indices = [[] for i in range(self.n_keys)]
for i, k in enumerate(self.keys_as_int):
self.indices[k].append(i)
self.indices = [np.array(elt) for elt in self.indices]
def apply(self, function, vector, broadcast=False):
if broadcast:
result = np.zeros(len(vector))
for idx in self.indices:
result[idx] = function(vector[idx])
else:
result = np.zeros(self.n_keys)
for k, idx in enumerate(self.indices):
result[k] = function(vector[idx])
return result
def read_file(f):
data = xr.open_dataset(f)
year = data.time_vars.isel(yr_day_utc=0).values
day = data.time_vars.isel(yr_day_utc=1).values
utc = data.time_vars.isel(yr_day_utc=2).values
total_secs = (utc*3600)
secs = total_secs//1
msecs = 1000*total_secs%1
dtime = np.datetime64(f'{np.median(year):0.0f}-01-01')+np.timedelta64(1, 'D')*(day-1)+np.timedelta64(1, 's')*(secs)+np.timedelta64(1, 'ms')*(msecs)
data['datetime']= (data.longitude.dims, np.broadcast_to(dtime, data.longitude.shape))
data = data.drop(labels=['time_vars'])
data['longitude'] = data['longitude']%360
return data
def load_test_data():
testfile = '/home/disk/eos5/rmeast/rain_rates_89/2015/AMSR2_89GHz_pcp_est_2015_206_day.nc'
data = read_file(testfile)
return data
def make_gridded_dataset(data, res=0.25):
"""
Big ugly function to make a lat/lon gridded netcdf out L2 AMSR precip retrievals.
In lieu of proper docstrings, because if you're reading this I forgot to polish this before sharing,
I'll explain the gist of what's happening.
Real simple, we take our data, smoosh it so that each obs falls at the nearest lat/lon point on our regular grid,
group the data by which grid box it falls in, and calculate the relevant stats of the distribution of obs in
each grid box. Stats are then returned as an xarray dataset.
"""
def round_nearest(arr, res):
nans = np.isnan(arr)
ret = (((arr+res/2)/res)//1)*res
ret[nans] = np.nan
return ret
def reshape_incomplete_array(complete_idx, incomplete_idx, vals, shape):
new_vals = np.full_like(complete_idx, fill_value=np.nan)
for idx, val in zip(incomplete_idx, vals):
new_vals[idx] = val
return new_vals.reshape(shape)
rain_stats_dict = {0: {'name': 'rain_prob',
'long_name': 'Probability of Rain',
'standard_name': 'rain_probability',
'units': '0-1'},
1: {'name': 'rain_rate',
'long_name': 'Rain Rate',
'standard_name': 'rain_rate',
'units': 'mm hr^-1'},
2: {'name': 'rain_rwr',
'long_name': 'Rain Rate While Raining',
'standard_name': 'conditional_rain_rate',
'units': 'mm hr^-1'},
3: {'name': 'rain_max',
'long_name': 'Max Rain Rate',
'standard_name': 'max_rain_rate',
'units': 'mm hr^-1'}}
func_dict = {'mean': np.nanmean,
'median': np.nanmedian,
'25_pctile': lambda x: np.nanpercentile(x, 25),
'75_pctile': lambda x: np.nanpercentile(x, 75),
'min': np.nanmin,
'max': np.nanmax}
if not 1/res == int(1/res):
raise ValueError("I haven't gone through to test whether this will work for any resolution that's not a unit fraction.")
#setting up new grid and gridbox index
grid_lats = np.arange(-90, 90, res)
grid_lons = np.arange(0, 360, res)
grid_coords = np.array(list(product(grid_lats, grid_lons)))
full_grid_lats = grid_coords[:,0]
full_grid_lons = grid_coords[:,1]
grid_coords_lats_idx = (full_grid_lats+90)/res
grid_coords_lons_idx = full_grid_lons/res
grid_combined_idx = (360/res)*grid_coords_lats_idx + grid_coords_lons_idx
assert(len(np.unique(grid_combined_idx)) == len(grid_combined_idx))
#setting up old data unique index
old_lats = data.latitude.values.flatten()
old_lons = data.longitude.values.flatten()
good_filt = np.logical_and(~np.isnan(old_lats), ~np.isnan(old_lons))
old_lats, old_lons = old_lats[good_filt], old_lons[good_filt]
lats_regrid = round_nearest(old_lats, res)
lons_regrid = round_nearest(old_lons, res)%360
lats_regrid_idx = (lats_regrid+90)/res
lons_regrid_idx = lons_regrid/res
unique_combined_idx = (360/res)*lats_regrid_idx + lons_regrid_idx
assert(set(unique_combined_idx).issubset(grid_combined_idx))
#grouping old data by box
grouped = Groupby(unique_combined_idx.astype(int))
def new_reshape(vals):
"""Reshapes value from groupby operation to an unfilled lat/lon grid"""
return reshape_incomplete_array(grid_combined_idx, grouped.keys, vals, shape=(len(grid_lats), len(grid_lons)))
ds = xr.Dataset()
ds['latitude'] = grid_lats
ds['longitude'] = grid_lons
ds.attrs['comments'] = "gridded netcdf created by <EMAIL>, adapted from R Eastman AMSR 89 GHz retrievals. " +\
"https://doi.org/10.1175/JTECH-D-18-0185.1"
ds.attrs['creation date'] = str(dt.datetime.utcnow())
ds.attrs['resolution'] = f'{str(res)} deg'
ds['obs_count'] = (('latitude', 'longitude'), new_reshape(grouped.apply(len, np.empty_like(unique_combined_idx))))
ds['not_nan_count'] = (('latitude', 'longitude'), new_reshape(grouped.apply(
lambda x: sum(~np.isnan(x)), np.empty_like(unique_combined_idx))))
ds['time'] = (('latitude', 'longitude'), new_reshape(grouped.apply(
lambda x: np.nanmean(x.astype('int64')).astype('datetime64[ns]'), data['datetime'].values.flatten()[good_filt])))
for k, v in rain_stats_dict.items():
print('working on '+v['name'])
sys.stdout.flush()
old_data = data.rain_stats.isel(prob_rate_rwr_max=k).values.flatten()[good_filt]
for func_name, func in func_dict.items():
new_vals = new_reshape(grouped.apply(func, old_data))
new_dict = {'long_name': f"{v['long_name']}_{func_name}",
'standard_name': f"{v['standard_name']}_{func_name}",
'units': v['units']}
ds[f"{v['name']}_{func_name}"] = (('latitude', 'longitude'), new_vals, new_dict)
# print(f"{v['name']}_{func_name}")
sys.stdout.flush()
print('finishing one')
sys.stdout.flush()
return ds
def process_file(f):
print(os.path.basename(f))
date = dt.datetime.strptime(os.path.basename(f)[20:28], '%Y_%j')
data = read_file(f)
ds = make_gridded_dataset(data, res=0.25)
ds = ds.expand_dims({'date': [date]})
save_name = os.path.join(r'/home/disk/eos9/jkcm/Data/rain/2016', os.path.basename(f)[:-3]+'_gridded.nc')
print(f'saving {save_name}...')
sys.stdout.flush()
comp = dict(zlib=True, complevel=2)
ds.to_netcdf(save_name, engine='h5netcdf', encoding={var: comp for var in ds.data_vars})
if __name__ == "__main__":
files_2014 = glob.glob('/home/disk/eos5/rmeast/rain_rates_89/2014/AMSR2_89GHz_pcp_est_2014_*_day.nc')
files_2015 = glob.glob('/home/disk/eos5/rmeast/rain_rates_89/2015/AMSR2_89GHz_pcp_est_2015_*_day.nc')
files_2016 = glob.glob('/home/disk/eos5/rmeast/rain_rates_89/2016/AMSR2_89GHz_pcp_est_2016_*_day.nc')
# done_2014 = [os.path.basename(i)[:-11] for i in glob.glob('/home/disk/eos9/jkcm/Data/rain/2014/AMSR2_89*.nc')]
# done_2015 = [os.path.basename(i)[:-11] for i in glob.glob('/home/disk/eos9/jkcm/Data/rain/2015/AMSR2_89*.nc')]
# not_done_2014 = [i for i in files_2014 if os.path.basename(i)[:-3] not in done_2014]
# not_done_2015 = [i for i in files_2015 if os.path.basename(i)[:-3] not in done_2015]
with Pool(16) as p:
p.map(process_file, files_2016)
#doing 2015
``` |
{
"source": "jkcm/uw-trace",
"score": 2
} |
#### File: uwtrajectory/ERA5/add_to_trajectory.py
```python
import numpy as np
import xarray as xr
import os
from .. import utils, met_utils, config
from ..LoopTimer import LoopTimer
def add_ERA_ens_to_trajectory(ds, box_degrees=2):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.25/2)) # go up/down/left/right this many pixels
unique_days = set([utils.as_datetime(i).date() for i in times])
ens_files = [os.path.join(config.ERA_ens_source, config.ERA_ens_fmt.format(i)) for i in unique_days]
with xr.open_mfdataset(sorted(ens_files), combine='by_coords') as data:
# data = data.rename({'level': 'ens_level'})
ds.coords['number'] = data.coords['number']
# ds.coords['ens_level'] = data.coords['ens_level']
if 'w' in data.data_vars.keys() and 'sp' in data.data_vars.keys():
data['dspdt'] = (data.sp.dims, np.gradient(data.sp, np.median(np.gradient(data.time.values)/np.timedelta64(1, 's')), axis=0),
{'units': "Pa s**-1", 'long_name': "Surface pressure tendency", 'standard_name': 'tendency_of_surface_air_pressure'})
data['w_corr'] = (data.w.dims, data.w - data.dspdt, {'units': data.w.units, 'long_name': 'Vertical velocity (sp-corrected)'})
for var in data.data_vars.keys():
var_shape = data[var].isel(time=0, latitude=0, longitude=0).shape
vals = []
for (lat, lon, time) in zip(lats, lons, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print(f'out of range of data" {lat}, {lon}, {time}')
vals.append(np.full(var_shape, float('nan'), dtype='float'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss_shape = tuple([v for v,i in zip(z.shape,z.dims) if i in ['latitude', 'longitude'] ])
gauss = utils.gauss2D(shape=gauss_shape, sigma=gauss_shape[-1])
filtered = z * gauss
vals.append(filtered.sum(dim=('latitude', 'longitude')).values)
ds['ERA_ens_'+var] = (tuple(x for x in data[var].dims if x not in ['latitude', 'longitude']), np.array(vals), data[var].attrs)
# print('adding ensemble temperatures...')
# ens_temp_files = [os.path.join(utils.ERA_ens_temp_source, i) for i in sorted(os.listdir(utils.ERA_ens_temp_source))]
# with xr.open_mfdataset(sorted(ens_temp_files), combine='by_coords') as data:
# data = data.rename({'level': 'ens_level'})
# #ds.coords['number'] = data.coords['number']
# #ds.coords['ens_level'] = data.coords['ens_level']
# for var in data.data_vars.keys():
# var_shape = data[var].isel(time=0, latitude=0, longitude=0).shape
# vals = []
# for (lat, lon, time) in zip(lats, lons, times):
# if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
# lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
# print('out of range of data')
# print(lat, lon, time)
# print(np.max(data.coords['latitude'].values), np.min(data.coords['latitude'].values))
# print(np.max(data.coords['longitude'].values), np.min(data.coords['longitude'].values))
# vals.append(np.full(var_shape, float('nan'), dtype='float'))
# continue
# x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
# latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
# z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
# #this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
# gauss_shape = tuple([v for v,i in zip(z.shape,z.dims) if i in ['latitude', 'longitude'] ])
# gauss = utils.gauss2D(shape=gauss_shape, sigma=gauss_shape[-1])
# filtered = z * gauss
# vals.append(filtered.sum(dim=('latitude', 'longitude')).values)
# ds['ERA_ens_'+var] = (tuple(x for x in data[var].dims if x not in ['latitude', 'longitude']), np.array(vals), data[var].attrs)
return ds
def add_ERA_to_trajectory(ds, box_degrees=2):
"""Retrieve ERA5 data in a box around a trajectory
Assumes ERA5 data is 0.3x0.3 degrees
Returns an xarray Dataset
"""
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
unique_days = set([utils.as_datetime(i).date() for i in times])
files = [os.path.join(config.ERA_source, config.ERA_fmt.format(i))
for i in unique_days]
# flux_files = [os.path.join(utils.ERA_source, "ERA5.flux.NEP.{:%Y-%m-%d}.nc".format(i))
# for i in unique_days]
with xr.open_mfdataset(sorted(files), combine='by_coords') as full_dataset:
data = full_dataset.sel(latitude=slice(np.max(lats)+box_degrees, np.min(lats)-box_degrees), longitude=slice(np.min(lons)-box_degrees, np.max(lons)+box_degrees))
print('prepping data...')
ds.coords['level'] = data.coords['level']
#adding in q:
T = data['t'].values
RH = data['r'].values
p = np.broadcast_to(data.coords['level'].values[None, :, None, None], T.shape)*100
q = utils.qv_from_p_T_RH(p, T, RH)
data['q'] = (('time', 'level', 'latitude', 'longitude'), q)
data['q'] = data['q'].assign_attrs({'units': "kg kg**-1",
'long_name': "specific_humidity",
'dependencies': 'ERA_t, ERA_p, ERA_r'})
MR = q/(1-q)
data['MR'] = (('time', 'level', 'latitude', 'longitude'), MR)
data['MR'] = data['MR'].assign_attrs({'units': "kg kg**-1",
'long_name': "mixing_ratio",
'dependencies': 'ERA_t, ERA_p, ERA_r'})
# adding gradients in for z, t, and q. Assuming constant grid spacing.
for var in ['t', 'q', 'z', 'u', 'v', 'MR']:
print(f'creating gradient in {var}...', end="\r")
[_,_,dvardj, dvardi] = np.gradient(data[var].values)
dlatdy = 360/4.000786e7 # degrees lat per meter y
def get_dlondx(lat) : return(360/(np.cos(np.deg2rad(lat))*4.0075017e7))
lat_spaces = np.diff(data.coords['latitude'].values)
lon_spaces = np.diff(data.coords['longitude'].values)
try:
assert(np.allclose(lat_spaces, -0.25, atol=0.01) and np.allclose(lon_spaces, 0.25, atol=0.05))
except AssertionError as e:
print(np.unique(lat_spaces))
print(np.unique(lon_spaces))
raise e
dlondi = np.mean(lon_spaces)
dlatdj = np.mean(lat_spaces)
dlondx = get_dlondx(data.coords['latitude'].values)
dvardx = dvardi/dlondi*dlondx[None,None,:,None]
dvardy = dvardj/dlatdj*dlatdy
data['d{}dx'.format(var)] = (('time', 'level', 'latitude', 'longitude'), dvardx)
data['d{}dy'.format(var)] = (('time', 'level', 'latitude', 'longitude'), dvardy)
grad_attrs = {'q': {'units': "kg kg**-1 m**-1",
'long_name': "{}_gradient_of_specific_humidity",
'dependencies': "ERA_t, ERA_p, ERA_r"},
't': {'units': "K m**-1",
'long_name': "{}_gradient_of_temperature",
'dependencies': "ERA_t"},
'z': {'units': "m**2 s**-2 m**-1",
'long_name': "{}_gradient_of_geopotential",
'dependencies': "ERA_z"},
'u': {'units': "m s**-1 m**-1",
'long_name': "{}_gradient_of_zonal_wind",
'dependencies': "ERA_u"},
'v': {'units': "m s**-1 m**-1",
'long_name': "{}_gradient_of_meridional_wind",
'dependencies': "ERA_v"},
'MR': {'units': "kg kg**-1 m**-1",
'long_name': "{}_gradient_of_mixing_ratio",
'dependencies': "ERA_t, ERA_p, ERA_r"}}
for key, val in grad_attrs.items():
for (n, drn) in [('x', 'eastward'), ('y', 'northward')]:
attrs = val.copy()
var = 'd{}d{}'.format(key, n)
attrs['long_name'] = attrs['long_name'].format(drn)
data[var] = data[var].assign_attrs(attrs)
print('\ndone')
for var in data.data_vars.keys():
vals = []
print(f'working on {var}...')
lt = LoopTimer(len(lats))
for i, (lat, lon, time) in enumerate(zip(lats, lons, times)):
lt.update()
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print(f'out of range of data" {lat}, {lon}, {time}')
print(np.max(data.coords['latitude']), np.min(data.coords['latitude']), np.max(data.coords['longitude']) , np.min(data.coords['longitude']))
vals.append(np.full_like(data.coords['level'], float('nan'), dtype='float'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', tolerance=np.timedelta64(1, 'h'), time=time)
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss = utils.gauss2D(shape=z.shape[1:], sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered, axis=(1,2)))
print('')
ds['ERA_'+var] = (('time', 'level'), np.array(vals))
ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(data[var].attrs)
t_1000 = ds.ERA_t.sel(level=1000).values
theta_700 = met_utils.theta_from_p_T(p=700, T=ds.ERA_t.sel(level=700).values)
LTS = theta_700-t_1000
ds['ERA_LTS'] = (('time'), np.array(LTS))
ds['ERA_LTS'] = ds['ERA_LTS'].assign_attrs(
{"long_name": "Lower tropospheric stability",
"units": "K",
"_FillValue": "NaN"})
t_dew = t_1000-(100-ds.ERA_r.sel(level=1000).values)/5
lcl = met_utils.calculate_LCL(t=t_1000, t_dew=t_dew, z=ds.ERA_z.sel(level=1000).values/9.81)
z_700 = ds.ERA_z.sel(level=700).values/9.81
gamma_850 = met_utils.get_moist_adiabatic_lapse_rate(ds.ERA_t.sel(level=850).values, 850)
eis = LTS - gamma_850*(z_700-lcl)
ds['ERA_EIS'] = (('time'), np.array(eis))
ds['ERA_EIS'] = ds['ERA_EIS'].assign_attrs(
{"long_name": "Estimated inversion strength",
"units": "K",
"_FillValue": "NaN"})
# with xr.open_mfdataset(sorted(flux_files), combine='by_coords') as flux_data:
# for var in flux_data.data_vars.keys():
# vals = []
# for (lat, lon, time) in zip(lats, lons%360, times):
# if lat > np.max(flux_data.coords['latitude']) or lat < np.min(flux_data.coords['latitude']) or \
# lon > np.max(flux_data.coords['longitude']) or lon < np.min(flux_data.coords['longitude']):
# print(f'out of range of data" {lat}, {lon}, {time}')
# vals.append(float('nan'))
# continue
# x = flux_data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
# latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
# z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(1, 'h'))
# gauss = utils.gauss2D(shape=z.shape, sigma=z.shape[0])
# filtered = z.values * gauss
# vals.append(np.sum(filtered))
# ds['ERA_'+var] = (('time'), np.array(vals))
# ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(flux_data[var].attrs)
ds.attrs['ERA_params'] = f'ERA5 data acquired from ECWMF Copernicus at cds.climate.copernicus.eu/. statistics computed over a {box_degrees}-deg average centered on trajectory. EIS and LTS computed according to Wood and Bretherton (2006) and Klein and Hartmann (1993) respectively.'
ds.attrs['ERA_reference'] = 'Copernicus Climate Change Service (C3S) (2017): ERA5: Fifth generation of ECMWF atmospheric reanalyses of the global climate . Copernicus Climate Change Service Climate Data Store (CDS), date of access. https://cds.climate.copernicus.eu/cdsapp#!/home'
return ds
def add_ERA_sfc_to_trajectory(ds, box_degrees=2):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
unique_days = set([utils.as_datetime(i).date() for i in times])
sfc_files = [os.path.join(config.ERA_source, config.ERA_sfc_fmt.format(i))
for i in unique_days]
with xr.open_mfdataset(sorted(sfc_files), combine='by_coords') as data:
for var in data.data_vars.keys():
vals = []
for (lat, lon, time) in zip(lats, lons, times):
if lat > np.max(data.coords['latitude']) or lat < np.min(data.coords['latitude']) or \
lon > np.max(data.coords['longitude']) or lon < np.min(data.coords['longitude']):
print('out of range of data')
print(lat, lon, time)
vals.append(float('nan'))
continue
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
z = x.sel(method='nearest', tolerance=np.timedelta64(59, 'm'), time=time)
gauss = utils.gauss2D(shape=z.shape, sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered))
ds['ERA_'+var] = (('time'), np.array(vals))
ds['ERA_'+var] = ds['ERA_'+var].assign_attrs(data[var].attrs)
return ds
def add_advection_to_trajectory(ds):
"""Add advection to trajectory after adding ERA data
TODO make data dependencies explicit
"""
names = dict(u='ERA_u', v='ERA_v', u_t='traj_u', v_t='traj_v',
dtdx='ERA_dtdx', dtdy='ERA_dtdy', dqdx='ERA_dqdx', dqdy='ERA_dqdy', dMRdx='ERA_dMRdx', dMRdy='ERA_dMRdy')
assert np.all([i in ds.data_vars.keys() for i in names.values()])
rel_adv_of_T = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dtdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dtdy']].values)
rel_adv_of_q = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dqdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dqdy']].values)
rel_adv_of_MR = -((ds[names['u']].values-ds[names['u_t']].values[:, None])*ds[names['dMRdx']].values + \
(ds[names['v']].values-ds[names['v_t']].values[:, None])*ds[names['dMRdy']].values)
T_adv_attr = {'units': "K s**-1",
'long_name': "trajectory_relative_advection_of_temperature",
'dependencies': 'ERA_t, traj_u, traj_v, ERA_u, ERA_v'}
q_adv_attr = {'units': "kg kg**-1 s**-1",
'long_name': "trajectory_relative_advection_of_specific_humidity",
'dependencies': 'ERA_q, traj_u, traj_v, ERA_u, ERA_v'}
MR_adv_attr = {'units': "kg kg**-1 s**-1",
'long_name': "trajectory_relative_advection_of_mixing ratio",
'dependencies': 'ERA_q, traj_u, traj_v, ERA_u, ERA_v'}
ds['ERA_T_adv'] = (('time', 'level'), rel_adv_of_T)
ds['ERA_T_adv'] = ds['ERA_T_adv'].assign_attrs(**T_adv_attr)
ds['ERA_q_adv'] = (('time', 'level'), rel_adv_of_q)
ds['ERA_q_adv'] = ds['ERA_q_adv'].assign_attrs(**q_adv_attr)
ds['ERA_MR_adv'] = (('time', 'level'), rel_adv_of_MR)
ds['ERA_MR_adv'] = ds['ERA_MR_adv'].assign_attrs(**MR_adv_attr)
return ds
```
#### File: uwtrajectory/MERRA2/add_to_trajectory.py
```python
from .. import utils, les_utils, config
import os
import xarray as xr
import numpy as np
def add_MERRA_to_trajectory(ds, box_degrees=2):
"""Add MERRA-inferred aerosol number concentrations to trajectory.
"""
lats, lons, times = ds.lat.values, ds.lon.values, utils.as_datetime(ds.time.values)
unique_days = set([utils.as_datetime(i).date() for i in times])
files = [os.path.join(config.MERRA_dir, config.MERRA_fmt.format(i)) for i in unique_days]
# if location=='nep':
# files = [os.path.join("/home/disk/eos4/jkcm/Data/MERRA/3h/", "more_vertical", "MERRA2_400.inst3_3d_aer_Nv.{:%Y%m%d}.nc4.nc4".format(i))
# for i in unique_days]
# elif location=='sea':
# files = [os.path.join("/home/disk/eos4/jkcm/Data/MERRA/sea/new/", "MERRA2_400.inst3_3d_aer_Nv.{:%Y%m%d}.SUB.nc".format(i))
# for i in unique_days]
with xr.open_mfdataset(sorted(files), combine='by_coords') as merra_data:
if np.abs(np.mean(ds.lon.values))>90: # i.e. our trajectory is closer to 180 than it is to 0 lon. force to 0-360
merra_data.coords['lon'] = merra_data['lon']%360
lons = lons%360
else:
merra_data.coords['lon'] = (merra_data['lon']+180)%360-180
lons = (lons+180)%360-180
merra_data = merra_data.sel(lat=slice(np.min(lats)-2, np.max(lats)+2), lon=slice(np.min(lons)-2, np.max(lons)+2))
# calculating MERRA pressure levels and heights
dz = merra_data.DELP/(9.81*merra_data.AIRDENS)
assert(merra_data.DELP.dims[1]=='lev')
assert(dz.dims[1]=='lev')
assert(dz.lev[-1]==72)
z = dz[:,::-1,:,:].cumsum(axis=1)[:,::-1,:,:]
z.load()
z[:, :-1, :, :] = (z.values[:, 1:, :, :]+z.values[:, :-1, :, :])/2
z[:, -1,:,:] = z[:, -1,:,:]/2
p = -merra_data.DELP[:,::-1,:,:].cumsum(axis=1)[:,::-1,:,:]
p = p + merra_data.PS
p.load()
p[:, :-1, :, :] = (p.values[:, 1:, :, :]+p.values[:, :-1, :, :])/2
p[:, -1,:,:] = (p[:, -1,:,:]+merra_data.PS)/2
merra_data['H'] = z
merra_data.H.attrs = {'long_name': 'mid_layer_heights', 'units': 'm'}
merra_data['PL'] = p
merra_data.PL.attrs = {'long_name': 'mid_level_pressure', 'units': 'Pa'}
vals_to_add = ['Na_tot', 'Na_tot_corr', 'H', 'PL', 'RH', 'AIRDENS']
na_tot = np.zeros_like(merra_data.SS001.values)
new_vals = []
for varname,params in les_utils.merra_species_dict_colarco.items():
vals_to_add.append(varname)
var = merra_data[varname]
num=les_utils.mass_to_number(mass=var, air_density=merra_data.AIRDENS.values, shape_params=params)
na_tot = na_tot+num
merra_data[varname+'_Na'] = (('time', 'lev', 'lat', 'lon'), num)
new_vals.append(varname+'_Na')
merra_data['Na_tot'] = (('time', 'lev', 'lat', 'lon'), na_tot, {'long_name': 'total aerosol number concentration, >100 um', 'units': 'cm**-3'})
merra_data['Na_tot_corr'] = (('time', 'lev', 'lat', 'lon'), np.exp(1.24*np.log(na_tot) + 0.18), {'long_name': 'total aerosol number concentration, >100 um, corrected to aircraft', 'units': 'cm**-3'})
merra_data['Na_tot_corr_BL_logfit'] = (('time', 'lev', 'lat', 'lon'), np.exp(0.63*np.log(na_tot) + 2.42), {'long_name': 'total aerosol number concentration, >100 um, corrected to aircraft (boundary layer obs only)', 'units': 'cm**-3'})
ds = ds.assign_coords(lev = les_utils.MERRA_lev(merra_data.lev))
merra_data = merra_data.assign_coords(lev = les_utils.MERRA_lev(merra_data.lev))
for var in vals_to_add+new_vals:
var_shape = merra_data[var].isel(time=0, lat=0, lon=0).shape
vals = []
for (lat, lon, time) in zip(lats, lons, times):
if lat > np.max(merra_data.coords['lat']) or lat < np.min(merra_data.coords['lat']) or \
lon > np.max(merra_data.coords['lon']) or lon < np.min(merra_data.coords['lon']):
print(f'out of range of data" {lat}, {lon}, {time}')
print(merra_data.coords['lat'])
print(merra_data.coords['lon'])
raise ValueError()
continue
try:
x = merra_data[var].sel(lon=slice(lon - box_degrees/2, lon + box_degrees/2),
lat=slice(lat - box_degrees/2, lat + box_degrees/2))
except KeyError as e:
print(var)
print(lon, lat)
print(merra_data.lon)
print(merra_data.lat)
raise e
z = x.sel(method='nearest', time=time, tolerance=np.timedelta64(2, 'h'))
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss_shape = tuple([v for v,i in zip(z.shape,z.dims) if i in ['lat', 'lon'] ])
gauss = utils.gauss2D(shape=gauss_shape, sigma=gauss_shape[-1])
filtered = z * gauss
vals.append(filtered.sum(dim=('lat', 'lon')).values)
if var in vals_to_add:
attrs = merra_data[var].attrs
elif var in new_vals:
attrs = {'long_name': merra_data[var[:-3]].long_name + ', inferred aerosol number concentration', 'units':'cm**-3'}
ds['MERRA_'+var] = (tuple(x for x in merra_data[var].dims if x not in ['lat', 'lon']), np.array(vals), attrs)
ds['MERRA_Na_tot_mass'] = ds.MERRA_OCPHILIC + ds.MERRA_OCPHOBIC + ds.MERRA_BCPHILIC + \
ds.MERRA_BCPHOBIC + ds.MERRA_SO4 + ds.MERRA_DU001 + ds.MERRA_DU002 +\
ds.MERRA_DU003 +ds.MERRA_DU004 + ds.MERRA_DU005 + ds.MERRA_SS001 + \
ds.MERRA_SS002 + ds.MERRA_SS003 + ds.MERRA_SS004 + ds.MERRA_SS005
# #akn=aitken = everything below 80nm
# #acc = accumulution = everything between 80 and 1000
# #crs=coarse = everything above 1000
mass_acc_dict = {}
mass_akn_dict = {}
mass_crs_dict = {}
num_acc_dict = {}
num_akn_dict = {}
num_crs_dict = {}
for x in ['MERRA_OCPHILIC', 'MERRA_OCPHOBIC', 'MERRA_BCPHILIC', 'MERRA_BCPHOBIC', 'MERRA_SO4']:
params = les_utils.merra_species_dict_colarco[x.split('_')[1]]
data = ds[x]
rho = ds.MERRA_AIRDENS.values
n0 = les_utils.get_n0(mass=data.values, density=params['density'], r_max=50, r_min=0.001,
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
mass_acc_dict[x] = les_utils.get_m_subset(density=params['density'], n0=n0, r_min=0.08, r_max=1,
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
mass_akn_dict[x] = les_utils.get_m_subset(density=params['density'], n0=n0, r_min=0.01, r_max=0.08,
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
mass_crs_dict[x] = les_utils.get_m_subset(density=params['density'], n0=n0, r_min=1, r_max=params['upper'],
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
num_acc_dict[x] = les_utils.get_n_subset(n0, r_min=0.08, r_max=1,
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
num_akn_dict[x] = les_utils.get_n_subset(n0, r_min=0.01, r_max=0.08,
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
num_crs_dict[x] = les_utils.get_n_subset(n0, r_min=1, r_max=params['upper'],
std_dev=params['geometric_std_dev'], mode_radius=params['mode_radius'])
ds[x+'_n0'] = (('time', 'lev'), n0*rho)
mass_acc_attrs = {'long_name': 'accumulation mode aerosol mass',
'units': 'kg kg**-1'}
mass_akn_attrs = {'long_name': 'aikten mode aerosol mass',
'units': 'kg kg**-1'}
mass_crs_attrs = {'long_name': 'coarse mode aerosol mass',
'units': 'kg kg**-1'}
ds['MERRA_acc_mass'] = (('time', 'lev'), np.sum(list(mass_acc_dict.values()), axis=0) + \
ds.MERRA_DU001.values + ds.MERRA_SS002.values + ds.MERRA_SS003.values,
mass_acc_attrs)
ds['MERRA_akn_mass'] = (('time', 'lev'), np.sum(list(mass_akn_dict.values()), axis=0) + \
ds.MERRA_SS001.values,
mass_akn_attrs)
ds['MERRA_crs_mass'] = (('time', 'lev'), np.sum(list(mass_crs_dict.values()), axis=0) + \
ds.MERRA_DU002.values + ds.MERRA_DU003.values + ds.MERRA_DU004.values + ds.MERRA_DU005.values + \
ds.MERRA_SS004.values + ds.MERRA_SS005.values,
mass_crs_attrs)
num_acc_attrs = {'long_name': 'accumulation mode aerosol number',
'units': 'kg kg**-1'}
num_akn_attrs = {'long_name': 'aikten mode aerosol number',
'units': 'kg kg**-1'}
num_crs_attrs = {'long_name': 'coarse mode aerosol number',
'units': 'kg kg**-1'}
ds['MERRA_acc_num'] = (('time', 'lev'), np.sum(list(num_acc_dict.values()), axis=0) + \
ds.MERRA_DU001_Na.values + ds.MERRA_SS002_Na.values + ds.MERRA_SS003_Na.values,
mass_acc_attrs)
ds.lev.attrs['long_name'] = 'model level pressure'
ds.lev.attrs['units'] = 'millibars'
ds.attrs['MERRA_params'] = f'MERRA-2 data primarily downloaded from NASA GMAO, and statistics computed over a {box_degrees}-deg average centered on trajectory. For aerosol estimates (Na), equivalent aerosol number is computed based on aerosol mass consistent with the MERRA2-assumed aerosol optical properties. Contact <EMAIL> for details.'
ds.attrs['MERRA_reference'] = 'MERRA-2 data available at https://gmao.gsfc.nasa.gov/reanalysis/MERRA-2.'
return ds
def new_add_MERRA_to_trajectory(ds, box_degrees=2):
ds['MERRA_Na_tot_mass'] = ds.MERRA_OCPHILIC + ds.MERRA_OCPHOBIC + ds.MERRA_BCPHILIC + ds.MERRA_BCPHOBIC + ds.MERRA_SO4 + ds.MERRA_DU001 + ds.MERRA_DU002 + ds.MERRA_DU003 +ds.MERRA_DU004 + \
ds.MERRA_DU005 + ds.MERRA_SS001 + ds.MERRA_SS002 + ds.MERRA_SS003 + ds.MERRA_SS004 + ds.MERRA_SS005
#akn=aitken = everything below 80nm
#acc = accumulution = everything between 80 and 1000
#crs=coarse = everything above 1000
mass_acc_dict = {}
mass_aik_dict = {}
mass_crs_dict = {}
num_acc_dict = {}
num_aik_dict = {}
num_crs_dict = {}
# for x in ['MERRA_OCPHILIC', 'MERRA_OCPHOBIC', 'MERRA_BCPHILIC', 'MERRA_BCPHOBIC', 'MERRA_SO4']:
# ds['MERRA_Na_acc_mass'] =
# ds['MERRA_Na_akn_mass'] =
# ds['MERRA_Na_crs_mass'] =
# ds['MERRA_akn_num'] =
# ds['MERRA_acc_num'] =
# ds['MERRA_crs_num'] =
return ds
#aitken = low-100nm
#add aikten NUMBER
#add aikten mass
```
#### File: uwtrajectory/SSMI/add_to_trajectory.py
```python
from .. import config
import numpy as np
import xarray as xr
import glob
def add_SSMI_to_trajectory(ds, box_degrees=2, hour_tolerance=0.5):
lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values
cloud_vals = np.full_like(lats, fill_value=np.nan)
vapor_vals = np.full_like(lats, fill_value=np.nan)
wspd_vals = np.full_like(lats, fill_value=np.nan)
cloud_vals_std = np.full_like(lats, fill_value=np.nan)
vapor_vals_std = np.full_like(lats, fill_value=np.nan)
wspd_vals_std = np.full_like(lats, fill_value=np.nan)
count_vals = np.full_like(lats, fill_value=np.nan)
total_vals = np.full_like(lats, fill_value=np.nan)
sats = ['f15', 'f16' ,'f17', 'f18']
for sat in sats:
ssmi_data = xr.open_mfdataset(glob.glob(config.SSMI_file_fmt.format(sat)), concat_dim='time', combine='by_coords')
for i, (lat, lon, time) in enumerate(zip(lats, lons%360, times)):
for orbit_segment in [0,1]:
ds_sub = ssmi_data.sel(time=time-np.timedelta64(1-orbit_segment, 'D'), method='nearest', tolerance=np.timedelta64(24, 'h')).sel(orbit_segment=orbit_segment)
#note to future users: because of the longitude of CSET, the 1-day offset is included. This is a hardcoded value to deal with the fact that the UTC
# day of SSMI doesn't line up when you're near the antimeridian. Future use should deal with this better, by deciding whether or not to add a day
# based on the longitude being considered.
ds_sub2 = ds_sub.sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat - box_degrees/2, lat + box_degrees/2))
sel_date = ds_sub2.UTCtime
nsample = 1-(np.sum(ds_sub2.nodata)/np.size(ds_sub2.nodata)).values
if nsample < 0.5:
# print('no samples')
# print(f'skipping {time}')
continue
else:
meantime = np.nanmean(ds_sub2.UTCtime.values)
sampletime = ds_sub2.time.values + np.timedelta64(int(meantime), 'h') + np.timedelta64(int(60*(meantime - int(meantime))), 'm')
miss = (time-sampletime)/np.timedelta64(1, 'h')
if np.abs(miss)<hour_tolerance:
#print(f'{sat}: found data at {time}, off by {miss} hours. sample fill is {nsample:.0%}')
# print(np.sum(~np.isnan(ds_sub2.cloud).values), np.sum(~np.isnan(ds_sub2.vapor).values), np.sum(~np.isnan(ds_sub2.wspd_mf)).values)
# print(np.size(ds_sub2.nodata.values)-np.sum(ds_sub2.nodata.values), np.sum(~ds_sub2.nodata).values)
cloud_vals[i] = np.nanmean(ds_sub2.cloud)
vapor_vals[i] = np.nanmean(ds_sub2.vapor)
wspd_vals[i] = np.nanmean(ds_sub2.wspd_mf)
count_vals[i] = np.sum(~np.isnan(ds_sub2.cloud))
total_vals[i] = np.size(ds_sub2.cloud.values)
cloud_vals_std[i] = np.nanstd(ds_sub2.cloud)
vapor_vals_std[i] = np.nanstd(ds_sub2.vapor)
wspd_vals_std[i] = np.nanstd(ds_sub2.wspd_mf)
# else:
# print('outside hour tolerance')
# break
ds['SSMI_LWP'] = (('time'), np.array(cloud_vals), ds_sub2.cloud.attrs)
ds['SSMI_LWP_std'] = (('time'), np.array(cloud_vals_std), ds_sub2.cloud.attrs)
ds['SSMI_WVP'] = (('time'), np.array(vapor_vals), ds_sub2.vapor.attrs)
ds['SSMI_VWP_std'] = (('time'), np.array(vapor_vals_std), ds_sub2.vapor.attrs)
ds['SSMI_WSPD'] = (('time'), np.array(wspd_vals), ds_sub2.wspd_mf.attrs)
ds['SSMI_WSPD_std'] = (('time'), np.array(wspd_vals_std), ds_sub2.wspd_mf.attrs)
ds['SSMI_n_samples'] = (('time'), np.array(count_vals), {'long_name': 'SSMI number of data samples'})
ds['SSMI_n_total'] = (('time'), np.array(total_vals), {'long_name': 'SSMI total number of pixels'})
# print(np.size(ds_sub2.values))
ds['SSMI_WSPD_std'] = (('time'), np.array(wspd_vals_std), ds_sub2.wspd_mf.attrs)
for i in ['SSMI_LWP_std', 'SSMI_VWP_std', 'SSMI_WSPD_std']:
ds[i].attrs['long_name'] = ds[i].attrs['long_name']+' standard deviation over box'
ds.attrs['SSMI_params'] = f'SSM/I data added from satellites {", ".join(sats).upper()}; statistics computed over a {box_degrees}-deg average centered on trajectory'
ds.attrs['SSMI_reference'] = f"SSM/I and SSMIS data are produced by Remote Sensing Systems. Data are available at www.remss.com/missions/ssmi."
return ds
```
#### File: uw-trace/uwtrajectory/utils.py
```python
import pytz
import os
import re
import pandas as pd
import netCDF4 as nc4
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm, rc
from ftplib import FTP
from mpl_toolkits.basemap import Basemap
from time import sleep
from urllib.request import urlopen
from urllib.error import HTTPError
import collections
import matplotlib.path as path
import glob
# import xlrd
import xarray as xr
import warnings
import collections
import pickle
import sys
from . import met_utils as mu
from scipy.interpolate import interp1d
# %% Parameters
SMALL_SIZE = 16
MEDIUM_SIZE = 20
BIGGER_SIZE = 24
rc('font', size=SMALL_SIZE) # controls default text sizes
rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
rc('legend', fontsize=SMALL_SIZE) # legend fontsize
rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
rc('figure', dpi=100)
font = {'family' : 'DejaVu Sans',
'weight' : 'normal'}
rc('font', **font)
#cset flight case names
all_cases = {
1: {'ALC_name': 'ALC_RF02B-RF03CD',
'TLC_name': 'TLC_RF02-RF03_1.0-1.5-2.0', #opt 1.0, fine
'trajectories': [0, 1]},
2: {'ALC_name': 'ALC_RF02C-RF03AB',
'TLC_name': 'TLC_RF02-RF03_0.5-1.0', #opt 1.0, fine
'trajectories': [0, 1]},
3: {'ALC_name': 'ALC_RF04A-RF05CDE',
'TLC_name': 'TLC_RF04-RF05_2.0-2.3-2.5-3.0', #opt 2.0. check
'trajectories': [0, 1]},
4: {'ALC_name': 'ALC_RF04BC-RF05AB',
'TLC_name': 'TLC_RF04-RF05_1.0-2.0', #opt 2.0, ok
'trajectories': [0, 1]},
5: {'ALC_name': 'ALC_RF06A-RF07BCDE',
'TLC_name': 'TLC_RF06-RF07_3.5-4.0-4.3-4.6-5.0', #opt 3.0, check 3.5
'trajectories': [0, 1]},
6: {'ALC_name': 'ALC_RF06BC-RF07A',
'TLC_name': 'TLC_RF06-RF07_1.6-2.0-2.3-2.6-3.0', #opt 1.6, check
'trajectories': [0, 1]},
7: {'ALC_name': 'ALC_RF08A-RF09DEF',
'TLC_name': 'TLC_RF08-RF09_4.0-4.5-5.0',
'trajectories': [0, 1]},
8: {'ALC_name': 'ALC_RF08B-RF09BC',
'TLC_name': 'TLC_RF08-RF09_3.0-3.5',
'trajectories': [0, 1]},
9: {'ALC_name': 'ALC_RF08CD-RF09A',
'TLC_name': 'TLC_RF08-RF09_1.5-2.0',
'trajectories': [0, 1]},
10: {'ALC_name': 'ALC_RF10A-RF11DE',
'TLC_name': 'TLC_RF10-RF11_5.5-6.0', #opt 5.0, removed
'trajectories': [0, 1]},
11: {'ALC_name': 'ALC_RF10BC-RF11BC',
'TLC_name': 'TLC_RF10-RF11_3.0-3.5-4.0-5.0', #opt 5.0, fine
'trajectories': [0, 1]},
12: {'ALC_name': 'ALC_RF10D-RF11A',
'TLC_name': 'TLC_RF10-RF11_1.0-1.5', #opt 1.0, ok
'trajectories': [0, 1]},
13: {'ALC_name': 'ALC_RF12A-RF13E',
'TLC_name': 'TLC_RF12-RF13_4.5', #opt 5.0, removed
'trajectories': [0, 1]},
14: {'ALC_name': 'ALC_RF12B-RF13CD',
'TLC_name': 'TLC_RF12-RF13_3.0-3.5', #added 3.0, ok
'trajectories': [0, 1]},
15: {'ALC_name': 'ALC_RF12C-RF13B',
'TLC_name': 'TLC_RF12-RF13_2.5-3.0',
'trajectories': [0, 1]},
16: {'ALC_name': 'ALC_RF14A-RF15CDE',
'TLC_name': 'TLC_RF14-RF15_3.5-4.0',
'trajectories': [0, 1]},
17: {'ALC_name': 'ALC_RF14B-RF15B',
'TLC_name': 'TLC_RF14-RF15_3.0',
'trajectories': [0, 1]},
18: {'ALC_name': 'ALC_RF14CD-RF15A',
'TLC_name': 'TLC_RF14-RF15_1.0-2.0',
'trajectories': [0, 1]}
}
def get_lon_prime(lat, lon, lon0=-140, lat0=30):
lonp = lon0 + 0.8*(lon-lon0) + 0.4*(lat-lat0)
return lonp
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def closest_index(lat_traj, lon_traj, lat, lon):
dist = ((lat - lat_traj)**2 + (lon - lon_traj)**2)**(0.5)
return np.unravel_index(np.nanargmin(dist), dist.shape)
def get_GOES_files_for_dates(date_array):
# if True:
all_GOES_files = sorted(glob.glob(r'/home/disk/eos4/jkcm/Data/CSET/GOES/VISST_pixel/G15V03.0.NH.*.NC'))
all_GOES_date_strings = [i[-22:-10] for i in all_GOES_files]
relevant_dates = [dt.datetime.strftime(i, '%Y%j.%H%M') for i in sorted(as_datetime(date_array))]
relevant_files = sorted([all_GOES_files[all_GOES_date_strings.index(d)] for d in relevant_dates])
return relevant_files
def get_ERA_data(var_list, lats, lons, times, pressures, box_degrees=2):
"""Retrieve ERA5 data in a box around a trajectory
Assumes ERA5 data is 0.3x0.3 degrees
Returns an xarray Dataset
"""
space_index = int(np.round(box_degrees/0.3/2)) # go up/down/left/right this many pixels
assert len(lats) == len(lons) == len(times) == len(pressures)
unique_days = set([as_datetime(i).date() for i in times])
files = [os.path.join(ERA_source, "ERA5.pres.NEP.{:%Y-%m-%d}.nc".format(i))
for i in unique_days]
return_ds = xr.Dataset(coords={'time': times})
with xr.open_mfdataset(sorted(files)) as data:
for var in var_list:
vals = []
for (lat, lon, time, pres) in zip(lats, lons%360, times, pressures):
x = data[var].sel(longitude=slice(lon - box_degrees/2, lon + box_degrees/2),
latitude=slice(lat + box_degrees/2, lat - box_degrees/2))
y = x.sel(method='nearest', tolerance=np.timedelta64(minutes=59), time=time)
z = y.sel(method='nearest', tolerance=50, level=pres)
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss = gauss2D(shape=z.shape, sigma=z.shape[0])
filtered = z.values * gauss
vals.append(np.sum(filtered))
da = xr.DataArray(data=vals, coords={'time': times}, dims=['time'])
return_ds[var] = da
return return_ds
lev_map = {'1': 0.0100, '2': 0.0200, '3': 0.0327, '4': 0.0476,
'5': 0.0660, '6': 0.0893, '7': 0.1197, '8': 0.1595,
'9': 0.2113, '10': 0.2785, '11': 0.3650, '12': 0.4758,
'13': 0.6168, '14': 0.7951, '15': 1.0194, '16': 1.3005,
'17': 1.6508, '18': 2.0850, '19': 2.6202, '20': 3.2764,
'21': 4.0766, '22': 5.0468, '23': 6.2168, '24': 7.6198,
'25': 9.2929, '26': 11.2769, '27': 13.6434, '28': 16.4571,
'29': 19.7916, '30': 23.7304, '31': 28.3678, '32': 33.8100,
'33': 40.1754, '34': 47.6439, '35': 56.3879, '36': 66.6034,
'37': 78.5123, '38': 92.3657, '39': 108.6630, '40': 127.8370,
'41': 150.3930, '42': 176.9300, '43': 208.1520, '44': 244.8750,
'45': 288.0830, '46': 337.5000, '47': 375.0000, '48': 412.5000,
'49': 450.0000, '50': 487.5000, '51': 525.0000, '52': 562.5000,
'53': 600.0000, '54': 637.5000, '55': 675.0000, '56': 700.0000,
'57': 725.0000, '58': 750.0000, '59': 775.0000, '60': 800.0000,
'61': 820.0000, '62': 835.0000, '63': 850.0000, '64': 865.0000,
'65': 880.0000, '66': 895.0000, '67': 910.0000, '68': 925.0000,
'69': 940.0000, '70': 955.0000, '71': 970.0000, '72': 985.0000}
pres_map = {}
for k, v in lev_map.items():
pres_map[v] = int(k)
def get_MERRA_level(pressure):
a, b = zip(*[(float(k), v) for k, v in lev_map.items()])
levels = sorted(a)
pressures = sorted(b)
return(interp1d(pressures, levels)(pressure))
def MERRA_lev(lev, invert=False, lev_map=lev_map):
if invert:
pres_map = {}
for k, v in lev_map.items():
pres_map[str(v)] = int(k)
lev_map = pres_map
if isinstance(lev, collections.Iterable):
pres = [lev_map[str(int(i))] for i in lev]
else:
pres = lev_map[int(float(str(lev)))]
return pres
def get_MERRA_data(var_list, lats, lons, times, pressures, box_degrees=2):
"""Retrieve ERA5 data in a box around a trajectory
Assumes ERA5 data is 0.3x0.3 degrees
Returns an xarray Dataset
"""
# Merra lat spacing is 0.5 deg (n-s), lon-spacing is 0.625 (e-w)
#lat_space_index = int(np.round(box_degrees/0.5/2)) # go up/down this many pixels
#lon_space_index = int(np.round(box_degrees / 0.625 / 2)) # go left-right this many pixels
assert len(lats) == len(lons) == len(times) == len(pressures)
unique_days = set([as_datetime(i).date() for i in times])
files = [os.path.join(MERRA_source, "svc_MERRA2_400.inst3_3d_aer_Nv.{:%Y%m%d}.nc4".format(i))
for i in unique_days]
return_ds = xr.Dataset(coords={'time': times})
with xr.open_mfdataset(sorted(files)) as data:
for var in var_list:
vals = []
for (lat, lon, time, pres) in zip(lats, (lons+180)%360-180, times, pressures):
x = data[var].sel(lon=slice(lon - box_degrees/2, lon + box_degrees/2),
lat=slice(lat - box_degrees/2, lat + box_degrees/2))
y = x.sel(method='nearest', tolerance=dt.timedelta(minutes=179), time=time)
z = y.sel(method='nearest', tolerance=1, lev=get_MERRA_level(pres))
#this applies a 2D gaussian the width of z, i.e. sigma=box_degrees
gauss = gauss2D(shape=z.shape, sigma=max(z.shape))
filtered = z.values * gauss
vals.append(np.sum(filtered))
da = xr.DataArray(data=vals, coords={'time': times}, dims=['time'])
return_ds[var] = da
return return_ds
def dep_get_MERRA_var(varname, nc, make_vol=True):
if varname == 'SALT':
salt_names = ['SS{:03}'.format(i) for i in range(1, 3)]
var = np.sum([nc[name][:] for name in salt_names], axis=0)
var_name_str = 'Sea Salt Mixing Ratio (all bins)'
units = nc['SS001'].units
elif varname == 'DUST':
dust_names = ['DU{:03}'.format(i) for i in range(1, 2)]
var = np.sum([nc[name][:] for name in dust_names], axis=0)
var_name_str = 'Dust Mixing Ratio (all bins)'
units = nc['DU001'].units
elif varname == 'BC':
BC_names = ['BCPHILIC', 'BCPHOBIC']
var = np.sum([nc[name][:] for name in BC_names], axis=0)
var_name_str = 'Black Carbon Mixing Ratio (total)'
units = nc['BCPHILIC'].units
elif varname == 'OC':
OC_names = ['OCPHILIC', 'OCPHOBIC']
var = np.sum([nc[name][:] for name in OC_names], axis=0)
var_name_str = 'Organic Carbon Mixing Ratio (total)'
units = nc['OCPHILIC'].units
elif varname == 'SG':
SG_names = ['DMS', 'MSA', 'SO2']
var = np.sum([nc[name][:] for name in SG_names], axis=0)
var_name_str = 'Sulfur Compounds Mixing Ratio (total)'
units = nc['OCPHILIC'].units
elif varname == 'AEROSOL':
aa_names = ['SS001', 'SS002', 'DU001', 'BCPHILIC', 'BCPHOBIC',
'OCPHILIC', 'OCPHOBIC', 'SO4']
var = np.sum([nc[name][:] for name in aa_names], axis=0)
var_name_str = 'Particulate Aerosol Mixing Ratio (total)'
units = nc['BCPHILIC'].units
else:
var = nc[varname][:].squeeze() # (time, pres, lats, lons)
var_name_str = (' ').join(nc[varname].long_name.split('_'))
units = nc[varname].units
# Sorting out units and variable names, converting to volumetric
if units == "kg kg-1" and np.mean(var) < 0.01: # dealing with aerosol
if make_vol:
dens = nc['AIRDENS'][:].squeeze() # (time, pres, lats, lons)
var = var * dens * 10**9
units = r' (${\mu}g m^{-3}$)'
else:
var = var*10**9
units = r' (${\mu}g kg^{-1}$)'
else:
units = (r'\_').join(units.split('_'))
var_str = var_name_str + ' (' + units + ')'
return var, var_str
def CSET_date_from_table(date, time):
"""return datetime object from CSET Lookup Table-formatted date and time
"""
d = as_datetime(dt.datetime.strptime(str(int(date)), '%m%d%y') + dt.timedelta(seconds=time))
return d
def add_leg_sequence_labels(df, start_times, end_times, legs, sequences):
"""add leg labels to insitu data."""
# data = self.flight_data
sequence_array = np.empty(df.time.values.shape, dtype='U1')
leg_array = np.empty(df.time.values.shape, dtype='U1')
df['leg'] = (('time'), leg_array)
df['sequence'] = (('time'), sequence_array)
for s, e, leg, seq in zip(start_times, end_times, legs, sequences):
which_times = np.logical_and(as_datetime(df['time'].values) >= s,
as_datetime(df['time'].values) <= e)
df['leg'][which_times] = leg
df['sequence'][which_times] = seq
df = df.set_coords(['leg', 'sequence'])#, inplace=True)
return df, sequences
# self.sequences = sorted(list(set(sequences)))
def flightpair_from_flight(flight):
if isinstance(flight, str):
if len(flight) == 4:
flight = int(flight[2:])
else:
flight = int(flight)
if not flight in range(2, 16):
raise ValueError('invalid flight number')
if flight % 2 == 0:
return ('rf{:02d}_rf{:02d}'.format(flight, flight + 1))
elif flight % 2 == 1:
return ('rf{:02d}_rf{:02d}'.format(flight - 1, flight))
def get_waypoint_data(flight, waypoint_type='a'):
# selecting wp file
flightpair = flightpair_from_flight(flight)
floc = r'/home/disk/eos4/jkcm/Data/CSET/Trajectories/{}_waypoints'.format(waypoint_type)
wpfile = os.path.join(floc, flightpair.upper() + '_{}_waypoints.txt'.format(waypoint_type))
# parsing
def parseFunc(y, m, d, H, M):
return dt.datetime(int(y), int(m), int(d), int(H), int(M))
columns = ['lab', 'outlat', 'outlon', 'out_Y', 'out_M', 'out_D', 'out_HH', 'out_MM',
'retlat', 'retlon', 'ret_Y', 'ret_M', 'ret_D', 'ret_HH', 'ret_MM']
if waypoint_type == 'b':
columns.append('dist')
data = pd.read_table(wpfile, names=columns, skiprows=3, engine='python',
date_parser=parseFunc, index_col='lab',
sep='\s+', # delim_whitespace=True,
parse_dates={'out_time': ['out_Y', 'out_M', 'out_D', 'out_HH', 'out_MM'],
'ret_time': ['ret_Y', 'ret_M', 'ret_D', 'ret_HH', 'ret_MM']})
return (data)
def qv_from_p_T_RH(p, T, RH):
"""p in Pa, T in K, Rh in pct. return is in kg/kg
"""
es = 611.2*np.exp(17.67*(T-273.15)/(T-29.65))
qvs = 0.622*es/(p-es)
qv = qvs*RH/100
return qv
# qvs = 0.622*es/(p-0.378*es)
# rvs = qvs/(1-qvs)
# rv = RH/100. * rvs
# qv = rv/(1+rv)
# return qv
def load_flight_trajectory(flight, number, trajectory_type='500m_+72'):
flightpair = flightpair_from_flight(flight)
wp_data = get_waypoint_data(flight=flight, waypoint_type='a')
out_date = wp_data.loc[number, 'out_time']
trajectory_loc = r'/home/disk/eos4/jkcm/Data/CSET/Trajectories/{}'.format(flightpair)
trajectory_name = r'analysis.UW_HYSPLIT_GFS.{:%Y%m%d%H%M}.airmass_trajectories_{}.txt'.format(out_date,
trajectory_type)
t_data = read_tdump(os.path.join(trajectory_loc, trajectory_name))
return (t_data.sort_values('dtime'))
def load_flightplan(infile):
with open(infile, 'rb') as readfile:
flightplan = pickle.load(readfile)
return flightplan
def load_flight_file(infile):
"""
loads a flight file from disk.
Opposite of make_flight_file
"""
with open(infile, 'rb') as readfile:
flightplan = pickle.load(readfile)
return flightplan
def read_CSET_Lookup_Table(path=None, rf_num=None, sequences=None, legs=None, variables=None):
"""Read in data from the CSET Lookup Table.
Arguments
----------
path : str
string representing the location of the lookup table
rf_num : str or list, int
list of integers of research flights, or 'all' for all flights
legs : str or list, str
list of strings representing the LEG NAMES for which variables should
be retrieved, or 'all' for all variables
b: below cloud
c: in cloud
a: above cloud
p: porpoise
m: Mather sounding
k: Kona sounding
f: ferry
u: up sounding
d: down sounding
sequences : str or list, str
list of strings representing the SEQUENCE NAMES for which variables should
be retrieved, or 'all' for all defined sequences. first sequence of each flight
is 'A', last is C-E depending on how many sequences were performed.
NOTE: 'all' is NOT the same as leaving this as None (default). 'all' will
explicitly look at all the sequences, so any upper-level data would be excluded.
None will look only at rf_num and specified legs, ignoring sequences entirely.
variables: list, str
list of strings representing variables you want as list. leave blank to
get error message with all options. Useful ones are 'Date', 'ST', 'ET'
for date, start time, and end time
Returns
----------
ret_dict : dict
dictionary with m+2 entries, where m is the number of requested vars:
'rf': an array of length n the research flights
'sequence': an array of length n of the sequences
for each variable, a dictionary with units and a length n array
of variable values
"""
# warnings.warn("NOTE: usage change Feb 2018: sequences now refers to flight sequence (A,B,...) "
# "and legs refers to portion of flight ('b', 'p'), etc. see docstring")
if path is None:
path = r'/home/disk/eos4/jkcm/Data/CSET/LookupTable_all_flights.xls'
sheet = xlrd.open_workbook(path).sheet_by_index(0)
leg_colnum = np.argwhere(np.array(sheet.row_values(11)) == 'Leg Name').flatten()[0]
all_legs = [str(i) for i in sheet.col_values(leg_colnum)[18:]]
flight_colnum = np.argwhere(np.array(sheet.row_values(11)) == 'Research Flight Number').flatten()[0]
all_flights = [int(i) for i in sheet.col_values(flight_colnum)[18:]]
seq_colnum = np.argwhere(np.array(sheet.row_values(11)) == 'Sequence Name').flatten()[0]
all_sequences = [str(i) for i in sheet.col_values(seq_colnum)[18:]]
abb_cell = [str(i.value) for i in sheet.col_slice(0, 0, 10)]
val_cell = [str(i.value) for i in sheet.col_slice(1, 0, 10)]
varab = [str(i.value) for i in sheet.row_slice(12, 3, 39)]
vname = [str(i.value).ljust(28) for i in sheet.row_slice(11, 3, 39)]
vunit = [str(i.value).ljust(6) for i in sheet.row_slice(16, 3, 39)]
if legs == 'all':
legs = [str(i) for i in set(all_legs)]
elif isinstance(legs, str):
legs = [legs]
if rf_num == 'all':
rf_num = [i for i in set(all_flights)]
elif isinstance(rf_num, int):
rf_num = [rf_num]
if sequences == 'all':
sequences = [str(i) for i in set(all_sequences)]
elif isinstance(sequences, str):
sequences = [sequences]
# if there is missing input, print some helpful information
mess = "Missing or incorrect input, printing help"
if rf_num is None or not set(rf_num) <= set(all_flights):
mess += ("\n\nspecify the RESEARCH FLIGHTS (rf_num) you want as list."
"\noptions are {}".format(str([i for i in set(all_flights)])))
mess += "\nor select 'all'"
if legs is None or not set(legs) <= set(all_legs):
abbs = ['{}: {}'.format(a, b) for (a, b) in zip(abb_cell, val_cell)]
mess += ("\n\nspecify the LEG NAMES (legs) you want as list.\n"
"options are: \n{}".format('\n'.join(abbs)))
mess += "\nor select 'all'"
if sequences is not None and not set(sequences) <= set(all_sequences):
mess += ("\n\neither leave SEQUENCE NAMES (seqs) blank to \n"
"ignore sequences, or else specify as list, or select 'all'")
if variables is None or not set(variables) <= set(varab):
var = ['{}{}{}'.format(a.ljust(14), b, c) for (a, b, c) in
zip(varab, vname, vunit)]
mess += ("\n\nspecify the VARIABLES (variables) you want as list.\n"
"options are: \n{}".format('\n'.join(var)))
if len(mess) > 41:
raise ValueError(mess)
# otherwise return the requested values as a dict of dicts\
rows, = np.where(
[False]*18 + [True if (l in legs and f in rf_num) else False
for l, f in zip(all_legs, all_flights)])
if sequences is not None:
seqrows, = np.where(
[False]*18 + [True if s in sequences else False
for s in all_sequences])
rows = np.intersect1d(rows, seqrows)
cols, = np.where(
[False]*3 + [True if v in variables else False for v in varab])
rf = np.array([int(sheet.cell(r, flight_colnum).value) for r in rows])
leg = np.array([str(sheet.cell(r, leg_colnum).value) for r in rows])
seq = np.array([str(sheet.cell(r, seq_colnum).value) for r in rows])
ret_dict = {'rf': rf, 'leg': leg, 'seq': seq}
for c in cols:
varname = str(sheet.cell(12, c).value)
units = str(sheet.cell(16, c).value)
values = np.array([sheet.cell(r, c).value for r in rows])
ret_dict[varname] = {'units': units, 'values': values}
return ret_dict
def get_leg_times_by_sequence(flightnum, sequence, leg):
path = r'/home/disk/eos4/jkcm/Data/CSET/LookupTable_all_flights.xls'
flight = read_CSET_Lookup_Table(path, rf_num=flightnum, sequences=[sequence],
legs=[leg], variables=['Date', 'ST', 'ET'])
start_times = as_datetime([CSET_date_from_table(d, t) for d, t in
zip(flight['Date']['values'], flight['ST']['values'])])
end_times = as_datetime([CSET_date_from_table(d, t) for d, t in
zip(flight['Date']['values'], flight['ET']['values'])])
sounding_times = list(zip(flight['rf'], start_times, end_times))
return(sounding_times[0][1], sounding_times[0][2])
def read_CSET_data(fname, var_list=None,
start_date=None, end_date=None):
"""read in CSET UHSAS .nc file and returns requested variables
"""
with nc4.Dataset(fname, 'r') as nc:
timevar = nc.variables['Time']
date = nc4.num2date(timevar[:], units=timevar.units)
if start_date is None:
start_date = date[0]
if end_date is None:
end_date = date[-1]
indx = np.logical_and(date >= start_date, date <= end_date)
date = date[indx]
ret_dict = {'Date': date}
for var_name in var_list:
ret_dict[var_name] = nc.variables[var_name][:].squeeze()[indx]
return ret_dict
def get_GOES_data(variable_list, lat, lon, time, degree, dlat=12, dlon=21):
def GOES_file_from_date(time, location, max_distance=3):
"""Return the goes filename corresponding to the time in the location folder
max_distance is the max number of hours away we are allowed to validly look
dlat is number of lat indexes per degree, same for dlon
"""
# offs = 0 if time.minute < 30 else 1
f, b = np.arange(max_distance) + 1, -np.arange(max_distance)
offs = np.hstack(zip(f,b)) if time.minute > 30 else np.hstack(zip(b,f))
for off in offs:
file = "G15V03.0.NH.{:%Y%j.%H}00.PX.08K.NC".format(time + dt.timedelta(hours=int(off)))
if os.path.exists(os.path.join(location, file)):
return os.path.join(location, file)
raise IOError("no GOES file found!")
file_name = GOES_file_from_date(time, GOES_source)
with xr.open_dataset(file_name) as data:
# if True:
# data = xr.open_dataset(file_name)
[k for (k,v) in data.coords.items()]
ret_dict = {}
lats = data.coords['latitude'].values
lons = data.coords['longitude'].values
ilat, ilon = closest_index(lat, lon, lats, lons)
# dlat = lats[ilat-1,ilon] - lats[ilat,ilon]
# dlon = lons[ilat,ilon+1] - lons[ilat,ilon]
# delta_lat = degree/2/dlat
# delta_lon = degree/2/dlon
# lat_mask = np.logical_and(lats > lat - degree/2., lats < lat + degree/2.)
# lon_mask = np.logical_and(lats > lat - degree/2., lats < lat + degree/2.)
# crd_mask = np.logical_and(lat_mask, lon_mask)
delta_lat = int((degree/2)*dlat)
delta_lon = int((degree/2)*dlon)
# print(delta_lat)
# print(delta_lon)
latslice = slice(ilat-delta_lat,ilat+delta_lat)
lonslice = slice(ilon-delta_lon,ilon+delta_lon)
ret_dict['lat'] = lats[latslice,lonslice]
ret_dict['lon'] = lons[latslice,lonslice]
for variable in variable_list:
# variable = 'visible_count'
if variable not in data.data_vars.keys():
raise ValueError("{} variable not in dataset!")
vardata = data.data_vars[variable].values[latslice,lonslice]
ret_dict[variable] = vardata
# ret_dict[variable] = data.data_vars[variable].loc[dict(image_y=latslice,
# image_x=lonslice)]
return ret_dict
def get_flight_start_end_times(rf_num, lookup_table_path):
if rf_num == 16:
start_time = dt.datetime(2015, 8, 12, 15, 25)
end_time = dt.datetime(2015, 8, 12, 22, 5)
return (start_time, end_time)
x = read_CSET_Lookup_Table(lookup_table_path, rf_num=[rf_num],
legs=['m', 'k'], variables=['Date', 'ST', 'ET'])
if rf_num % 2 == 0: # westward leg, m is start, k is end
start_time = CSET_date_from_table(x['Date']['values'][0], x['ST']['values'][0])
end_time = CSET_date_from_table(x['Date']['values'][1], x['ET']['values'][1])
else: # eastward leg
start_time = CSET_date_from_table(x['Date']['values'][0], x['ST']['values'][0])
end_time = CSET_date_from_table(x['Date']['values'][1], x['ET']['values'][1])
return (start_time, end_time)
def make_landmask_dep(lats, lons):
def points_in_polys(points, polys):
result = []
# mask = np.empty_like(points)*False
for poly in polys:
# mask = path.contains_points(points, poly)
polypath = path.Path(poly)
mask = polypath.contains_points(points)
# result.extend(points[mask])
points = points[~mask]
return np.array(result)
m = Basemap(projection='moll',lon_0=0,resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
x, y = m(lons, lats)
# loc = np.c_[x, y]
# loc = np.array([(a, b) for a, b in zip(x, y)])
loc = np.array([(a, b) for a, b in zip(x.ravel(), y.ravel())])
polys = [p.boundary for p in m.landpolygons]
# path = path.Path
land_loc = points_in_polys(loc, polys)
mask = np.array([True if a in land_loc else False for a in loc]).reshape(x.shape)
return mask
def make_landmask(lats, lons):
m = Basemap(projection='cyl', resolution='c')
x, y = m(lons.ravel(), lats.ravel())
locations = np.c_[x, y]
polygons = [path.Path(p.boundary) for p in m.landpolygons]
result = np.zeros(len(locations), dtype=bool)
for polygon in polygons:
result += np.array(polygon.contains_points(locations))
return result.reshape(lats.shape)
def varcheck(fname, attr):
with nc4.Dataset(fname) as dataset:
if attr in list(dataset.variables.keys()):
# print 'okay'
return True
else:
print(fname)
return False
def get_hysplit_files(run_date, run_hours):
"""Get HYSPLIT files required to run trajectories, return as list of files
run_date: date of trajectory initialization
run_hours: hours of trajectory. negative number means backward trajectory
"""
today = dt.datetime.today()
start_date = min(run_date, run_date + dt.timedelta(hours=run_hours))
end_date = max(run_date, run_date + dt.timedelta(hours=run_hours))
days_since_start = (today.date() - start_date.date()).days
days_since_end = (today.date() - end_date.date()).days
file_list = []
while days_since_start > 0: # add all analysis files from previous days
date_to_add = today - dt.timedelta(days=days_since_start)
if date_to_add > end_date:
break
try:
f, d = get_hysplit_analysis(date_to_add)
file_list.append(f)
except ValueError:
print(('could not find analysis for {}'.format(date_to_add)))
days_since_start -= 1
if days_since_end < 1: # trajectory either ends today or in future
f, d = get_hysplit_appended_files(today)
file_list.append(f)
f, d = get_hysplit_forecast_files(today)
file_list.append(f)
return file_list
def get_hysplit_analysis(date):
"""
gets hysplit analysis file for day in date.
if the file is already acquired, will not download it again.
if the file does not exist yet raises error.
"""
ftp = FTP('arlftp.arlhq.noaa.gov')
ftp.login()
ftp.cwd('/archives/gdas0p5')
rx = re.compile('{:%Y%m%d}_gdas0p5\Z'.format(date))
files = sorted(filter(rx.match, ftp.nlst()))
if len(files) == 0:
raise ValueError("ARL: No analysis available for {:%Y%m%d} yet...".format(date))
newest = files[-1]
savedir = os.path.join(HYSPLIT_source, 'analysis')
if not os.path.exists(savedir):
os.makedirs(savedir)
print(("ARL: Attempting to find analysis file {} locally...".format(newest)))
if os.path.isfile(os.path.join(savedir, newest)):
print("ARL: File already acquired, not downloading it again.")
else:
print("ARL: File not found, will grab it from archives.")
try:
ftp.retrbinary("RETR " + newest,
open(os.path.join(savedir, newest), 'wb').write)
except:
print("ARL: Error in ftp transfer.")
raise
print('ARL: Analysis file successfully downloaded')
savedfile = os.path.join(savedir, newest)
print(('ARL: {}'.format(savedfile)))
return savedfile, date
def get_hysplit_appended_files(date=None):
"""
Gets most recent HYSPLIT appended files on date.
Returns file location and initialization time (in the appended
case that means the end of the file, so gfsa for 18Z on the 12th
is relevant from 18Z on the 10th through the 12th, for instance)
"""
f, d = get_hysplit_forecast_files(date, model='gfsa')
return f, d
def get_hysplit_forecast_files(date=None, model='gfsf'):
"""
Gets most recent HYSPLIT forecast files on date.
Finds most recent file on ARL server. If it already exists on disk,
does nothing and returns location on disk and initialization date.
If it does not exist on disk, downloads and then returns the same.
"""
def try_FTP_connect(ftpname):
counter = 0
while True:
try:
ftp = FTP(ftpname)
return ftp
except Exception as e:
counter += 1
sleep(1)
if counter > 20:
raise e
if date is None:
date = dt.datetime.utcnow()
ftp = try_FTP_connect('arlftp.arlhq.noaa.gov')
ftp.login()
ftp.cwd('/forecast/{:%Y%m%d/}'.format(date))
rx = re.compile('hysplit.*.{}\Z'.format(model))
files = list(filter(rx.match, ftp.nlst()))
if len(files) == 0: # too early in the day
print(('ARL: no recent {} matches, looking at yesterday instead'.format(model)))
date = date - dt.timedelta(days=1)
ftp.cwd('/forecast/{:%Y%m%d/}'.format(date))
files = list(filter(rx.match, ftp.nlst()))
newest = files[-1]
savedir = os.path.join(HYSPLIT_source, 'forecast',
'{:%Y%m%d}'.format(date))
if not os.path.exists(savedir):
os.makedirs(savedir)
print(("ARL: Attempting to find {} for {:%Y-%m-%d}...".format(newest, date)))
if os.path.isfile(os.path.join(savedir, newest)):
print("ARL: File already acquired, not downloading it again.")
else:
print("ARL: File not found, will grab it from server.")
try:
ftp.retrbinary("RETR " + newest,
open(os.path.join(savedir, newest), 'wb').write)
except:
print("AR:L Error in ftp transfer.")
raise
print('ARL: File successfully downloaded')
inittime = int(newest.split('.')[-2][1:3])
initdate = date.replace(hour=inittime, minute=0, second=0,
microsecond=0)
savedfile = os.path.join(savedir, newest)
print(("ARL: file saves as {}".format(savedfile)))
return(savedfile, initdate)
def write_control_file(start_time, coords, hyfile_list, hours, vertical_type, init_height,
tdumpdir):
"""
This file generates the CONTROL files used for running the trajectories.
start_time - the datetime object of when the trajectory should start
coords - list of decimal [lat, lon] pairs. N and E are positive.
hyfile_list - list of HYSPLIT source files on which to run model
hours- negative hours means backwards run
vertical_type:
0 'data' ie vertical velocity fields
1 isobaric
2 isentropic
3 constant density
4 constant internal sigma coord
5 from velocity divergence
6 something wacky to convert from msl to HYSPLIT's above ground level
7 spatially averaged vertical velocity
"""
fl = os.path.join(HYSPLIT_workdir, 'CONTROL')
f = open(fl, 'w')
f.write(start_time.strftime('%y %m %d %H\n'))
f.writelines([str(len(coords)), '\n'])
for j in coords:
f.write('{} {} {}\n'.format(str(j[0]), str(j[1]), init_height))
f.writelines([str(hours), '\n'])
f.writelines([str(vertical_type), '\n', '10000.0\n'])
f.write('{}\n'.format(len(hyfile_list)))
for hyfile in hyfile_list:
f.writelines([
os.path.dirname(hyfile), os.sep, '\n',
os.path.basename(hyfile), '\n'])
f.writelines([tdumpdir, os.sep, '\n', 'tdump',
start_time.strftime('%Y%m%dH%H%M'), '\n'])
f.close()
return os.path.join(tdumpdir, 'tdump'+start_time.strftime('%Y%m%dH%H%M'))
def read_tdump(tdump):
"""
Read a tdump file as output by the HYSPLIT Trajectory Model
Returns a pandas DataFrame object.
"""
def parseFunc(y, m, d, H, M):
return dt.datetime(int('20'+y), int(m), int(d), int(H), int(M))
columns = ['tnum', 'gnum', 'y', 'm', 'd', 'H', 'M', 'fhour', 'age', 'lat',
'lon', 'height', 'pres']
tmp = pd.read_table(tdump, nrows=100, header=None)
l = [len(i[0]) for i in tmp.values]
skiprows = l.index(max(l))
D = pd.read_table(tdump, names=columns,
skiprows=skiprows,
engine='python',
sep='\s+', # delim_whitespace=True,
parse_dates={'dtime': ['y', 'm', 'd', 'H', 'M']},
date_parser=parseFunc,
index_col='dtime')
return D
def bmap(ax=None, drawlines=True, llr=None, par_labs=[1, 1, 0, 0], mer_labs=[0, 0, 1, 1],
merspace=15, parspace=15, **kwargs):
if ax is None:
fig, ax = plt.subplots()
if llr is None:
lat_range = latlon_range['lat']
lon_range = latlon_range['lon']
else:
lat_range = llr['lat']
lon_range = llr['lon']
if 'projection' not in kwargs.keys():
kwargs['projection'] = 'cyl'
kwargs['rsphere'] =(6378137.00, 6356752.3142)
m = Basemap(llcrnrlon=lon_range[0], llcrnrlat=lat_range[0],
urcrnrlon=lon_range[1], urcrnrlat=lat_range[1],
ax=ax, resolution='l', **kwargs)
if drawlines:
m.drawparallels(np.arange(-90., 90., parspace), labels=par_labs, fontsize=14)
m.drawmeridians(np.arange(-180., 180., merspace), labels=mer_labs, fontsize=14)
m.drawcoastlines()
m.fillcontinents(color="white", lake_color="white")
return m
def read_flightpath(flightfile):
"""read in flight file netcdf and return as dict.
"""
with nc4.Dataset(flightfile, 'r') as flt_nc:
lats = flt_nc.variables['LATC'][:].copy()
lons = flt_nc.variables['LONC'][:].copy()
alt = flt_nc.variables['ALT'][:].copy()
timevar = flt_nc.variables['Time']
date = nc4.num2date(timevar[:], units=timevar.units)
if isinstance(lats, np.ma.core.MaskedArray):
m = np.logical_or(lats.mask, lons.mask)
lats = lats.data[~m]
lons = lons.data[~m]
alt = alt.data[~m]
date = date[~m]
fp = {'lats': lats, 'lons': lons, 'date': date,
'alt': alt}
return fp
def gridder(SW, NW, NE, SE, numlats=6, numlons=6):
"""each point is a [lat lon] corner of the desired area"""
lat_starts = np.linspace(SW[0], NW[0], numlats)
lon_starts = np.linspace(SW[1], SE[1], numlons)
lat_ends = np.linspace(SE[0], NE[0], numlats)
lon_ends = np.linspace(NW[1], NE[1], numlons)
lat_weight = np.linspace(0., 1., numlats)
lon_weight = np.linspace(0., 1., numlons)
lat = (1. - lon_weight[:, None])*lat_starts[None, :] +\
lon_weight[:, None]*lat_ends[None, :]
lon = (1. - lat_weight[:, None])*lon_starts[None, :] +\
lat_weight[:, None]*lon_ends[None, :]
l = []
for i in range(numlats):
for j in range(numlons):
l.append((lat[j, i], lon[i, j]))
return(l)
def plot_gridpoints(coords, outfile=None):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
m = bmap(ax=ax, proj='cyl', drawlines=True)
m.drawgreatcircle(-121.3, 38.6, -156, 19.8, linestyle='--', c='black')
colors = cm.rainbow(np.linspace(0, 1, len(coords)))
for i, crd in enumerate(coords):
m.plot(crd[1], crd[0], '*', c=colors[i], latlon=True, ms=12, label=i)
x, y = m(crd[1]+.5, crd[0]+.5)
ax.annotate(str(i), xy=(x, y), xytext=(x, y), xycoords='data',
textcoords='data', fontsize=6)
if outfile is not None:
ax.patch.set_visible(False)
fig.savefig(outfile, dpi=300, transparent=True, bbox_inches='tight',
pad_inches=0)
def plot_trajectory(date=None, filename=None):
if date is None and filename is None:
print('give me a date (YYYY-MM-DD) or a file, dummy')
return
elif date:
datet = dt.datetime.strptime(date, '%Y-%m-%d')
filename = os.path.join(trajectory_dir, 'tdump'+datet.strftime('%Y%m%dH%H%M'))
fig, ax, m_ax = make_map_plot()
add_tdump_to_plot(m_ax, filename)
return
def make_map_plot(ax=None, llr=None, **kwargs):
if ax is None:
fig, ax = plt.subplots(figsize=(7, 8))
else:
fig = ax.get_figure()
m_ax = bmap(ax=ax, llr=llr, **kwargs)
# m_ax.drawgreatcircle(-121.3, 38.6, -156, 19.8, linestyle='--', c='black')
m_ax.plot(-121.3, 38.6, 's', ms=8, c='black', latlon=True)
m_ax.plot(-156, 19.8, '*', ms=12, c='black', latlon=True)
# m_ax.plot(-118.2, 33.77, 's', ms=8, c='red', latlon=True)
return fig, ax, m_ax
def nan_correlate(x, y):
x, y = np.array(x), np.array(y)
index = np.logical_and(~np.isnan(x), ~np.isnan(y))
return np.corrcoef(x[index], y[index])[0][1]
def plot_single(t, m=None, c=None, i=None):
m.plot(t.lon.values, t.lat.values, c=c, latlon=True, label=t.tnum[0])
m.plot(t.lon.values[::6], t.lat.values[::6], '.', c=c, latlon=True)
m.plot(t.lon.values[0], t.lat.values[0], '*', c=c, latlon=True, ms=12)
m.plot(t.lon.values[-1], t.lat.values[-1], 's', c=c, latlon=True, ms=8)
if i is not None:
plt.annotate(str(i), xy=(t.lon.values[0]+.5, t.lat.values[0]+.5))
return m
def add_tdump_to_plot(m_ax, tdump):
T = read_tdump(tdump)
t = T.groupby('tnum')
colors = cm.rainbow(np.linspace(0, 1, len(list(t.groups.keys()))))
for i, k in enumerate(t.groups.keys()):
m_ax = plot_single(t.get_group(k), m=m_ax, c=colors[i], i=i)
return
def get_pesky_GOES_files():
badfiles = []
with open(r'/home/disk/p/jkcm/Code/Lagrangian_CSET/GOES_Extractor.log', 'r') as f:
for line in f:
if r'/home/disk/eos4/mcgibbon/nobackup/GOES' in line:
if line not in badfiles:
badfiles.append(line)
with open(r'/home/disk/p/jkcm/Code/Lagrangian_CSET/flawed_GOES.log', 'w') as g:
for line in sorted(badfiles):
if os.path.exists(line[:-1]):
size = '{:3.0f}'.format(os.path.getsize(line[:-1])/1024)
# print size
else:
size = 'NA '
replace_GOES_file(line[:-1])
g.writelines(size + ' ' + line)
def replace_GOES_file(filename, savedir=None):
oldfilename = os.path.basename(filename)
year = int(oldfilename[12:16])
date = dt.datetime(year, 1, 1) + dt.timedelta(days=int(oldfilename[16:19]) - 1)
newfilename = 'prod.goes-west.visst-pixel-netcdf.{:%Y%m%d}.{}'.format(
date, oldfilename)
floc = 'prod/goes-west/visst-pixel-netcdf/{:%Y/%m/%d}/'.format(date)
server = r'http://cloudsgate2.larc.nasa.gov/'
url = server + floc + newfilename
try:
response = urlopen(url)
except HTTPError:
print('could not find file!')
return
print('file found, downloading')
if savedir is None:
savedir = GOES_source
print(('old size is {}KB'.format(os.path.getsize(filename)/1024.)))
if os.path.dirname(filename) == savedir:
print('moving old file')
if not os.path.exists(os.path.join(savedir, 'old')):
os.makedirs(os.path.join(savedir, 'old'))
os.rename(filename, os.path.join(savedir, 'old', oldfilename))
save_file = os.path.join(savedir, oldfilename)
with open(save_file, 'wb') as fp:
while True:
chunk = response.read(16384)
if not chunk:
break
fp.write(chunk)
print(('new size = {}KB'.format(os.path.getsize(save_file)/1024.)))
def as_datetime(date, timezone=pytz.UTC):
"Converts all datetimes types to datetime.datetime with TZ = UTC"
def to_dt(d, timezone):
"""does all the heavy lifting
"""
supported_types = (np.datetime64, dt.datetime)
if not isinstance(d, supported_types):
raise TypeError('type not supported: {}'.format(type(d)))
if isinstance(d, np.datetime64):
# TODO: add timezoneawareness here
ts = (d - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
d = dt.datetime.utcfromtimestamp(ts)
if isinstance(d, pd.Timestamp):
d = d.to_datetime()
if isinstance(d, dt.datetime):
if d.tzinfo is None:
return(d.replace(tzinfo=timezone))
else:
return(d.astimezone(timezone))
if isinstance(date, (collections.Sequence, np.ndarray)):
return np.array([to_dt(x, timezone) for x in date])
return to_dt(date, timezone)
datemap = {'20150701': 'RF01',
'20150707': 'RF02',
'20150709': 'RF03',
'20150712': 'RF04',
'20150714': 'RF05',
'20150717': 'RF06',
'20150719': 'RF07',
'20150722': 'RF08',
'20150724': 'RF09',
'20150727': 'RF10',
'20150729': 'RF11',
'20150801': 'RF12',
'20150803': 'RF13',
'20150807': 'RF14',
'20150809': 'RF15',
'20150812': 'RF16'}
def get_data_from_dropsonde(file):
# file = os.path.join(dropsonde_dir, 'D20150712_201424_PQC.nc')
data = xr.open_dataset(file)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
index = data.GPSAlt.values < 4000
ret = {}
ret['TIME']=as_datetime(data.time_offset.values[index])
ret['GGLAT']=data.Lat.values[index]
ret['GGLON']=data.Lon.values[index]
ret['GGALT']=data.GPSAlt.values[index]
ret['RHUM']=data.RH.values[index]
ret['ATX']=data.Temp.values[index]+273.15
ret['PSX']=data.Press.values[index]
ret['DPXC']= data.Dewpt.values[index]+273.15
ret['QV'] = mu.qv_from_p_T_RH(ret['PSX']*100, ret['ATX'], ret['RHUM'])*1000
ret['MR'] = ret['QV']/(1-ret['QV']/1000)
ret['TVIR'] = mu.tvir_from_T_w(ret['ATX'], ret['MR']/1000)
ret['DENS'] = mu.density_from_p_Tv(ret['PSX']*100, ret['TVIR'])
ret['THETA']= mu.theta_from_p_T(ret['PSX'], ret['ATX'])
ret['THETAE']= mu.thetae_from_t_tdew_mr_p(ret['ATX'], ret['DPXC'], ret['MR']/1000, ret['PSX']*100) #equiv pot temp, K we can get this if we really want
ret['QL'] = np.full_like(ret['PSX'], fill_value=np.nan)
ret['THETAL'] = np.full_like(ret['PSX'], fill_value=np.nan)
ret['PLWCC']= np.full_like(ret['PSX'], fill_value=np.nan)
return ret
def date_interp(dates_new, dates_old, vals_old, bounds_error=False):
if not isinstance(dates_new, (collections.Sequence, np.ndarray)):
dates_new = np.array([dates_new])
dates_new = as_datetime(dates_new)
dates_old = as_datetime(dates_old)
ref = min(min(dates_old), min(dates_new))
d_new = [(i-ref).total_seconds() for i in dates_new]
d_old = [(i-ref).total_seconds() for i in dates_old]
vals_new = interp1d(d_old, vals_old, bounds_error=bounds_error)(d_new).squeeze()
if vals_new.shape == ():
return vals_new.item()
return vals_new
def get_cloud_only_vals(dataset, flip_cloud_mask=False):
# cloud if ql_cdp > 0.01 g/kg and RH > 95%
lwc_cdp = dataset['PLWCD_LWOI']
rhodt = dataset['RHODT']
mr = dataset['MR']
cheat_airdens = rhodt/mr
lwmr_cdp = lwc_cdp/cheat_airdens
lw_index = lwmr_cdp > 0.01
RH_index = dataset['RHUM'] > 95
cloud_index = np.logical_and(RH_index, lw_index)
if flip_cloud_mask:
cloud_index = np.logical_not(cloud_index)
return dataset.isel(time=cloud_index)
``` |
{
"source": "jk/cocoapod-badges",
"score": 2
} |
#### File: podbadge/utils/helpers.py
```python
__author__ = 'Flavio'
from django.conf import settings
from django.utils import simplejson
import urllib
import urllib2
import mimetypes
def prepare_shield(vendor, status):
url = shield_url(vendor, clean_info(status))
return fetch_shield(url)
def shield_url(vendor, status):
return 'http://%(service)s/%(vendor)s-%(status)s-%(color)s.png' % {
'service': settings.SHIELD_SERVICE,
'color': settings.SHIELD_COLOR,
'vendor': vendor,
'status': status,
}
def fetch_shield(url):
contents = urllib2.urlopen(url).read()
mimetype = mimetypes.guess_type(url)
return contents, mimetype
def clean_info(info):
clean = info.replace('-', '--').replace(' ', '_')
return urllib.quote(clean)
def get_pod_info(podname):
url = 'http://cocoapods.org/api/v1/pod/%s.json' % (podname, )
response = urllib2.urlopen(url)
return simplejson.loads(response.read())
``` |
{
"source": "jkcso/oss2021",
"score": 2
} |
#### File: api/oss_hugo/Ruaml_YAML_Formatter.py
```python
from ruamel.yaml import YAML
class RuamlYAMLHandler(BaseHandler):
"""
Load and export YAML metadata. By default, this handler uses YAML's
"safe" mode, though it's possible to override that.
"""
FM_BOUNDARY = re.compile(r'^-{3,}$', re.MULTILINE)
START_DELIMITER = END_DELIMITER = "---"
def load(self, fm, **kwargs):
"""
Parse YAML front matter. This uses yaml.SafeLoader by default.
"""
kwargs.setdefault('Loader', SafeLoader)
return yaml.load(fm, **kwargs)
def export(self, metadata, **kwargs):
"""
Export metadata as YAML. This uses yaml.SafeDumper by default.
"""
kwargs.setdefault('Dumper', SafeDumper)
kwargs.setdefault('default_flow_style', False)
kwargs.setdefault('allow_unicode', True)
metadata = yaml.dump(metadata, **kwargs).strip()
return u(metadata) # ensure unicode
```
#### File: api/tests/test_API_Hugo_OSS.py
```python
from unittest import TestCase
import frontmatter
from pbx_gs_python_utils.utils.Dev import Dev
from pbx_gs_python_utils.utils.Files import Files
from oss_hugo.API_Hugo_OSS import API_Hugo_OSS
from oss_hugo.OSS_Participant import OSS_Participant
from oss_hugo.OSS_Session import OSS_Session
class test_API_Hugo_OSS(TestCase):
def setUp(self):
self.api = API_Hugo_OSS()
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test_df_field(self):
df = self.api.df_field('chapter_leader').set_index('title')
data = df.to_dict()['chapter_leader']
assert data['<NAME>'] == 'Belgium'
def test_md_files_in_folder(self):
assert len(self.api.md_files_in_folder('content/participant')) > 50
def test_md_files_participants(self):
assert len(self.api.md_files_participants()) >50
def test_md_files_sessions(self):
assert len(self.api.md_files_sessions()) >50
def test_participants(self):
participants = self.api.participants()
assert len(participants) > 50
assert set(participants.values().__iter__().__next__()) == {'content', 'path', 'metadata'}
def test_participants__return_oss_participants(self):
participants = self.api.participants(return_oss_participants=True)
assert len(participants) > 50
assert type(list(participants.values())[0]) == OSS_Participant
assert '<NAME>' in set(participants)
def test_participants_metadatas(self):
assert len(self.api.participants_metadatas()) > 50
def test_sessions(self):
sessions = self.api.sessions()
assert len(sessions) > 50
assert set(sessions.values().__iter__().__next__()) == {'content', 'path', 'metadata'}
def test_sessions__return_oss_sessions(self):
sessions = self.api.sessions(return_oss_sessions=True)
assert len(sessions) > 50
assert type(list(sessions.values())[0]) is OSS_Session
assert 'Threat Model' in set(sessions)
def test_sessions_metadatas(self):
assert len(self.api.sessions_metadatas()) > 50
def test_sessions_oss(self):
assert len(self.api.sessions_oss()) > 50
```
#### File: api/tests/test_OSS_Schedule.py
```python
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from oss_hugo.OSS_Schedule import OSS_Schedule
from oss_hugo.OSS_Session import OSS_Session
class test_OSS_Schedule(TestCase):
def setUp(self):
self.schedule = OSS_Schedule()
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test_sessions_mapped_by_size(self):
self.result = self.schedule.sessions_mapped_by_size()
def test_df_sessions_registered_participants(self):
self.result = self.schedule.df_sessions_registered_participants()
``` |
{
"source": "jkcw/vocabquiz-autogen",
"score": 3
} |
#### File: vocabquiz-autogen/vocabquiz-autogen/RequestVocabData.py
```python
import requests
import json
import secretKey
secret_key = secretKey.secret_key
#vocab = "get"
#api_request = requests.get('https://www.dictionaryapi.com/api/v3/references/collegiate/json/' + vocab + '?key=' + secret_key)
#js = api_request.json()
file = open('vocab-cache.txt', "r")
js = json.load(file)
file.close()
#print(js[0]['def'][0]['sseq'])
#for i in js[0]['def'][0]['sseq']:
# print(i)
for i in js[0]['def'][0]['sseq'][0]:
definition = i[1]['dt'][0][1]
# TODO: There might be more than one example
example = i[1]['dt'][1][1][0]['t']
print(definition)
print(example)
class RequestVocabData:
def __init__(self, vocab):
self.vocab = vocab.lower()
self.js_dict = []
self.part_of_speech = []
def http_api_request(self):
api_request = requests.get('https://api.dictionaryapi.dev/api/v2/entries/en/' + self.vocab)
js = api_request.json()
try:
self.js_dict = js[0]
return True
except:
return False
def get_all_part_of_speech(self):
self.part_of_speech = self.js_dict["meanings"]
``` |
{
"source": "jkCXf9X4/hass_script_engine",
"score": 3
} |
#### File: decorator/default/proximity.py
```python
from datetime import datetime, timedelta, timezone
from logging import fatal
from custom_components.script_engine.decorator.abc.valid_decorator import ValidDecorator
from custom_components.script_engine.decorator.abc.decorator import Decorator
from custom_components.script_engine.decorator.decorator_type import DecoratorType
class Proximity(ValidDecorator):
"""
Decorator that ensures that a function can not be called to closely again
"""
def __init__(self, hours=0, minutes=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hours = hours
self.minutes = minutes
def time_is_outside_proximity(self):
return (self.state_switch_time + timedelta(hours=self.hours, minutes=self.minutes)) < datetime.now(timezone.utc)
def validate(self) -> bool:
if self.state_switch_time == None or self.time_is_outside_proximity():
return True
return False
def execute(self, *args, **kwargs):
self.update_state()
kwargs["result"][self.handler.get_index(self)] = self.state
return super().execute(*args, **kwargs)
```
#### File: decorator/post/arguments.py
```python
from typing import List
from custom_components.script_engine.decorator.abc.decorator import Decorator
from custom_components.script_engine.decorator.decorator_type import DecoratorType
from custom_components.script_engine.event.event_wrapper import StateChangedEvent
from homeassistant.core import Event, State
class Arguments(Decorator):
"""
Extracts the event states and wrap's the under the return_name kwarg
"""
def __init__(self, ids: List[str], key: str, return_name: str = "attributes", *args, **kwargs):
super().__init__(*args, **kwargs)
self.decorator_type = DecoratorType.POST
self.return_name = return_name
self.ids = ids
self.key = key
def execute(self, *args, **kwargs):
events: List[StateChangedEvent] = kwargs.get("events", [])
events = [i for i in events if i.entity_id in self.ids]
attributes_dict = {}
for i in events:
attributes_dict[i.entity_id] = i.new_state.attributes[self.key]
attributes = kwargs.get(self.return_name, [])
attributes.append(attributes_dict)
kwargs[self.return_name] = attributes
return super().execute(*args, **kwargs)
``` |
{
"source": "jkCXf9X4/script_engine",
"score": 3
} |
#### File: script_engine/decorator/debug.py
```python
from custom_components.script_engine.decorator.base_decorator import BaseDecorator
class Debug(BaseDecorator):
"""
Sets the debug flag to the decorators after in the chain
"""
def __init__(self, *args, **kwargs):
super().__init__() # *args, **kwargs)
self.decorator_type = "Debug"
self.name = type(self).__name__
def get_setup_output(self, *args, **kwargs):
kwargs["debug"] = True
return super().get_setup_output(*args, **kwargs)
```
#### File: script_engine/decorator/if_state.py
```python
from typing import Any, Callable, Optional
from custom_components.script_engine.decorator.state import State
from custom_components.script_engine.hass.extension.state import StateExt
class IfState(State):
"""
Decorator that checks if a state is valid
"""
def __init__(self,
id: str,
state: Optional[Any] = "*",
bigger_than: Optional[Any] = "*",
smaller_than: Optional[Any] = "*",
custom_eval: Optional[Callable[[Any ,Any], bool]] = None,
custom_eval_condition: Optional[Any] = True,
stay_valid: Optional[bool] = False,
*args, **kwargs):
super().__init__(id,
state=state,
previous_state="*",
bigger_than=bigger_than,
smaller_than=smaller_than,
custom_eval=custom_eval,
custom_eval_condition=custom_eval_condition,
stay_valid= stay_valid,
*args, **kwargs)
self.decorator_type = "IfState"
self.name = type(self).__name__
def setup(self, *args, **kwargs):
return super().setup(*args, **kwargs)
def get_default_output(self, *args, **kwargs): # overwrite base method
state = kwargs.get("states", [])
state.append(self.new_state)
kwargs["states"] = state
return args, kwargs
def default(self, *args, **kwargs):
self.new_state = self.hass.states.get(self.id)
return super().default(*args, **kwargs)
```
#### File: script_engine/decorator/proximity.py
```python
from datetime import datetime, timedelta, timezone
from custom_components.script_engine.decorator.base_decorator import BaseDecorator
class Proximity(BaseDecorator):
"""
Decorator that ensures that a function can not be called to closely again
"""
def __init__(self, hours=0, minutes=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.decorator_type = "Proximity"
self.name = type(self).__name__
self.hours = hours
self.minutes = minutes
self.last_trigger = None
def time_inside_frame_from_timeframe_to_now(dt: datetime, hours=0, minutes=0):
if (datetime.now(timezone.utc) - timedelta(hours=hours, minutes=minutes)) < dt:
return True
else:
return False
def default(self, *args, **kwargs):
if self.last_trigger == None or self.time_inside_frame_from_timeframe_to_now(datetime.now(), self.hours, self.minutes):
self.last_trigger = datetime.now()
not self.debug or self.log.debug(f"Proximity decorator is valid, continuing")
return super().default(*args, **kwargs)
not self.debug or self.log.debug(f"Proximity decorator is not valid, aborting")
return False
```
#### File: script_engine/decorator/to_state.py
```python
from typing import Any, Callable, Optional
from custom_components.script_engine.event.event_distributor import EventDistributor
from custom_components.script_engine.event.event_wrapper import StateChangedEvent
from custom_components.script_engine.decorator.state import State
class ToState(State):
"""
Decorator that is used to subscribe to and validate event states
"""
def __init__(self,
id: str,
state: Optional[Any] = "*",
previous_state: Optional[Any] = "*",
bigger_than: Optional[Any] = "*",
smaller_than: Optional[Any] = "*",
custom_eval: Optional[Callable[[Any ,Any], bool]] = None,
custom_eval_condition: Optional[Any] = True,
stay_valid: Optional[bool] = False,
*args, **kwargs):
super().__init__(id, state=state,
previous_state=previous_state,
bigger_than=bigger_than,
smaller_than=smaller_than,
custom_eval=custom_eval,
custom_eval_condition=custom_eval_condition,
stay_valid=stay_valid,
*args, **kwargs)
self.decorator_type = "IfState"
self.name = type(self).__name__
self.event_distributor = EventDistributor(self.hass, debug=self.debug)
def setup(self, *args, **kwargs):
self.event: StateChangedEvent = None
self.previous_valid = None
self.event_distributor.register_callback(self.id, callback=self.new_event)
return super().setup(*args, **kwargs)
def new_event(self, *args, **kwargs):
"""
Extracts the states from the event, then consuming it to prevent any other decorator from using it
Calls the first decorator and starts the walk down the decorator chain
"""
self.event = kwargs.get("event", None)
kwargs.pop("event", None)
# Store which decorator is the event trigger
kwargs["trigger"] = self
self.new_state = self.event.new_state
self.old_state = self.event.old_state
not self.debug or self.log.debug(f"\n----New event----\nFunction: {self.get_wrapped_function_name()}, Decorator: {self.id} \nNew: {self.new_state}, Old: {self.old_state}")
new_valid = self.is_valid(self.new_state, self.old_state)
if new_valid != self.previous_valid:
self.previous_valid = new_valid
not self.debug or self.log.debug(f"A switch in state, proceding, new valid state: {new_valid}")
self.decorators[0].default(*args, **kwargs)
else:
not self.debug or self.log.debug(f"No switch in state, aborting, state: {new_valid}")
def get_default_output(self, *args, **kwargs):
events = kwargs.get("events", [])
events.append(self.event)
kwargs["events"] = events
return args, kwargs
def default(self, *args, **kwargs):
not self.debug or self.log.debug(f"Default to_state: {self}")
return super().default(*args, **kwargs)
def teardown(self, *args, **kwargs):
self.event_distributor.remove_callback(self.id, callback=self.new_event)
return super().teardown(*args, **kwargs)
```
#### File: hass/extension/light.py
```python
import logging
from homeassistant.core import HomeAssistant, State
from .service import ServiceExt
from .state import StateExt
class LightExt:
_logger = logging.getLogger(__name__)
ON_STATE = "on"
OFF_STATE = "off"
UNKNOWN_STATE = ""
@classmethod
def turn_on(cls, hass : HomeAssistant, id, data={}, debug=False):
ServiceExt.call_service(hass,"light" , "turn_on", service_data=data, target= {"entity_id" : id}, debug=debug)
@classmethod
def turn_off(cls, hass: HomeAssistant, id, data={}, debug=False):
ServiceExt.call_service(hass,"light" , "turn_off", service_data=data, target= {"entity_id" : id}, debug=debug)
@classmethod
def get_std_attributes(cls, hass :HomeAssistant, id, debug=False):
state = hass.states.get(id)
if state == None:
raise Exception(f"Exception, {id} state not existing")
else:
on_off = state.state
attributes = ["brightness", "color_temp" ] #, "rgb_color", "rgbw_color", "rgbww_color"]
data = {}
for i in attributes:
if state.attributes.get(i, None) != None:
data[i] = state.attributes[i]
return on_off, data
``` |
{
"source": "jkd2021/YOLACT-with-lane-detection",
"score": 3
} |
#### File: jkd2021/YOLACT-with-lane-detection/LD.py
```python
import cv2
import numpy as np
class LaneDetection:
# some of the codes is functional with other project, if there are some misunderstanding, just ignore
def LD(picture = None):
# lines : np.array([])
# lines_Hough_al : np.array([])
left_lines_fitted, right_lines_fitted = np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]])
# read the orignal picture
if picture is not None:
# gray scale + Canny
frame = LaneDetection.app_canny(picture)
# make mask
mask = LaneDetection.mask(frame)
# put mask on the cannyed grayscale frame
masked_edge_img = cv2.bitwise_and(frame, mask)
# display the cannyed + masked grayscale frame
# LaneDetection.show_masked(canny_remote, masked_edge_img)
# do Hough Transform on the cannyed + masked grayscale frame, and
# seperate the lines into left ones and right ones
lines = cv2.HoughLinesP(masked_edge_img, 1, np.pi / 100, 15, minLineLength=00, maxLineGap=20)
if lines is not None:
left_lines = [line for line in lines if -0.5 > LaneDetection.calculate_slope(line)]
right_lines = [line for line in lines if LaneDetection.calculate_slope(line) > 0.5]
# Remove noisy lines
left_lines = LaneDetection.reject_abnormal_lines(left_lines, 0.1)
right_lines = LaneDetection.reject_abnormal_lines(right_lines, 0.1)
# Fit the left and right lane lines separately
left_lines_fitted = LaneDetection.least_squares_fit(left_lines)
right_lines_fitted = LaneDetection.least_squares_fit(right_lines)
if left_lines_fitted is None:
left_lines_fitted = np.array([[0, 0], [0, 0]])
if right_lines_fitted is None:
right_lines_fitted = np.array([[0, 0], [0, 0]])
return left_lines_fitted, right_lines_fitted
return left_lines_fitted, right_lines_fitted
# slope_calculation
def calculate_slope(line):
x_1, y_1, x_2, y_2 = line[0] # line:[[x1,y1,x2,y2], [], []...]
return (y_2 - y_1) / (x_2 - x_1 + 0.01) # calculate the scope of every single line
# Package of Canny
def app_canny(picture):
img = picture
minThreshold = 60
maxThreshold = 130
edges = cv2.Canny(img, minThreshold, maxThreshold)
return edges
# mask making(trapezoid area in front of the car)
def mask(frame):
mask = np.zeros_like(frame)
height = mask.shape[0]
height_bevel = int(0.75 * height)
length = mask.shape[1]
length_bevel = int(0.6 * length)
length_under = int(0.2 * length)
mask = cv2.fillPoly(mask, np.array([[[length_under, height], [length - length_bevel, height_bevel],
[length_bevel, height_bevel], [length - length_under, height],
[length_under, height]]]),
color=255)
########## the size of mask and the parameters in cv2.fillPoly() needs to be modified
########## for different scenarios (camera pointing angle and orientation), in order
########## to achieve the effect of recognition of the specified area in the image
return mask
# firstly calculate the mean slope in both lists of lines, if there are abnormal lines, which have
# great deviation with others in the same side, they will be rejected as noise. After rejection will
# the lines be fitted into left and right lanes by using least square fitting.
def reject_abnormal_lines(lines, threshold):
slopes = [LaneDetection.calculate_slope(line) for line in lines]
# -----------------------------------------------------------------------------------------------------
# i = 0
# while True:
# if i + 1 > len(slopes): # these codes equals to the parameter
# break # tuning in line 69 / 70
# if 0.5 > abs(slopes[i]):
# slopes.pop(i)
# lines.pop(i)
# i = i
# else:
# i += 1
# -----------------------------------------------------------------------------------------------------
while len(slopes) > 0:
mean = np.mean(slopes)
diff = [abs(slope - mean) for slope in slopes]
max_slope = np.argmax(diff)
if diff[max_slope] > threshold:
slopes.pop(max_slope)
lines.pop(max_slope)
else:
break
return lines
# least square fitting
def least_squares_fit(lines):
"""
:param (line in lines): set of lines, [np.array([x_1, y_1, x_2, y_2]), [np.array([x_1, y_1, x_2, y_2]),...]]
:return: end points of a line, np.array([[xmin, ymin], [xmax, ymax]])
"""
# 1.取出所有坐标点
x_coords = np.ravel([[line[0][0], line[0][2]] for line in lines]) # 第一个[0]代表横向,第二位[]代表取位
y_coords = np.ravel([[line[0][1], line[0][3]] for line in lines])
# 1.进行直线拟合,得到多项式系数
if lines != None:
if len(x_coords) >= 1:
poly = np.polyfit(x_coords, y_coords, deg=1)
# 1.根据多项式系数,计算两个直线上的点,用于唯一确定这条直线
point_min = (np.min(x_coords), np.polyval(poly, np.min(x_coords)))
point_max = (np.max(x_coords), np.polyval(poly, np.max(x_coords)))
return np.array([point_min, point_max], dtype=np.int)
else:
pass
else:
pass
## error report
class CustomError(Exception):
def __init__(self,ErrorInfo):
super().__init__(self)
self.errorinfo=ErrorInfo
def __str__(self):
return self.errorinfo
``` |
{
"source": "jkdelauney/web-project1",
"score": 3
} |
#### File: jkdelauney/web-project1/import.py
```python
import csv
import os
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def main():
# db.execute("DROP TABLE IF EXISTS books CASCADE")
db.execute(
"CREATE TABLE IF NOT EXISTS books (id SERIAL PRIMARY KEY,isbn varchar(13) NOT NULL, title character varying NOT NULL, author character varying NOT NULL, year character(4) NOT NULL)"
)
r_count = 0
f = open("books.csv")
reader = csv.reader(f)
next(reader) # advance past column headers
for isbn, title, author, year in reader: # isbn, title, author, year
db.execute(
"INSERT INTO books (isbn, title, author, year) VALUES (:isbn, :title, :author, :year)",
{"isbn": isbn, "title": title, "author": author, "year": year},
)
sys.stdout.write(".")
sys.stdout.flush()
r_count += 1
db.commit()
print(f"\nRecords imported: {r_count}")
if __name__ == "__main__":
main()
``` |
{
"source": "jkderrick028/Theoretical_Neuroscience",
"score": 3
} |
#### File: Programming/Python/pop3.py
```python
import pickle
import numpy as np
def load_data(path):
with open(path, 'rb') as f:
data = pickle.load(f)
return data
# question 9
data = load_data("pop_coding_3.4.pickle")
c = [data['c1'], data['c2'], data['c3'], data['c4']]
r = [data['r1'], data['r2'], data['r3'], data['r4']]
data = load_data("tuning_3.4.pickle")
stim = data['stim']
n = [data['neuron1'], data['neuron2'], data['neuron3'], data['neuron4']]
n_max = [np.max(np.mean(ni, axis=0)) for ni in n]
arg = np.divide([rr.mean() for rr in r], n_max)
popultion = np.multiply(c, arg.T)
popultion = popultion.sum(axis=1)
angle = np.arctan(-popultion[0] / popultion[1])
answer = 180 - angle * 180.0 / np.pi
print(answer)
``` |
{
"source": "jkdf2/markov_bots",
"score": 4
} |
#### File: jkdf2/markov_bots/rw.py
```python
from enum import Enum
import urllib.request
import graph
import pickle
import argparse
import sys
import tempfile
class Tokenization(Enum):
"""
word: Interpret the input as UTF-8 and split the input at any
white-space characters and use the strings between the white-space
as tokens. So "a b" would be ["a", "b"] as would "a\n b".
character: Interpret the input as UTF-8 and use the characters as
tokens.
byte: Read the input as raw bytes and use individual bytes as the
tokens.
none: Do not tokenize. The input must be an iterable.
"""
word = 1
character = 2
byte = 3
none = 4
class RandomWriter(object):
"""
A Markov chain based random data generator.
"""
def __init__(self, level, tokenization=Tokenization.none):
"""
Initialize a random writer.
Args:
level: The context length or "level" of model to build.
tokenization: A value from Tokenization. This specifies how
the data should be tokenized.
The value given for tokenization will affect what types of
data are supported.
"""
if level < 0:
raise ValueError("The level of analysis must be >= 0.")
if tokenization not in Tokenization:
raise ValueError("You did not provide a valid tokenization mode.")
self._mode = tokenization
self._level = level
self._graph = None
def generate(self):
"""
Yield random tokens using the model, infinitely.
"""
if self._graph is None:
raise ValueError("The RandomWriter must be trained before it can"
"generate tokens.")
while True:
yield self._graph.get_random_token()
def generate_file(self, filename, amount):
"""
Write a file using the model.
Args:
filename: The name of the file to write output to.
amount: The number of tokens to write.
For character or byte tokens this will just output the
tokens one after another. For any other type of token a space
will be added between tokens.
"""
if self._mode is Tokenization.byte:
if not hasattr(filename, 'write'):
with open(filename, mode="wb") as fi:
self.generate_file(fi, amount)
else:
gen = self.generate()
filename.write(bytes(next(gen) for _ in range(amount)))
else:
if not hasattr(filename, 'write'):
with open(filename, mode="w", encoding="utf-8") as fi:
self.generate_file(fi, amount)
else:
for _ in range(amount):
content = str(next(self.generate()))
if self._mode is not Tokenization.character:
content += " "
filename.write(content)
def save_pickle(self, filename_or_file_object):
"""
Write this model out as a Python pickle.
Args:
filename_or_file_object: A filename or file object to write to.
File objects assumed to be opened in binary mode.
"""
if hasattr(filename_or_file_object, 'write'):
pickle.dump(self, filename_or_file_object, pickle.HIGHEST_PROTOCOL)
else:
# Better open the file first
with open(filename_or_file_object, "wb") as fi:
self.save_pickle(fi)
@classmethod
def load_pickle(cls, filename_or_file_object):
"""
Loads a Python pickle and make sure it is in fact a model.
Args:
filename_or_file_object: A filename or file object to load
from.
Return:
A new instance of RandomWriter which contains the loaded
data.
File objects assumed to be opened in binary mode.
"""
try:
data = pickle.load(filename_or_file_object)
if isinstance(data, cls):
return data
else:
# Something bad happened
raise ValueError("A RandomWriter could not be loaded from the"
"file.")
except TypeError:
# Better open the file first
with open(filename_or_file_object, "rb") as fi:
data = pickle.load(fi)
return data
def train_url(self, url):
"""
Compute the probabilities based on the data downloaded from url.
Args:
url: The URL to download.
"""
if self._mode is Tokenization.none:
raise ValueError("This method is only supported if the "
" tokenization mode is not none.")
with urllib.request.urlopen(url) as response:
text = response.read()
if self._mode is not Tokenization.byte:
try:
text = str(text, encoding="utf-8")
except UnicodeDecodeError:
# Can't decode as UTF-8, so just try our best
text = str(text)
self.train_iterable(text)
def train_iterable(self, data):
"""
Compute the probabilities based on the data given.
If the tokenization mode is none, data must be an iterable. If
the tokenization mode is character or word, then data must be
a string. Finally, if the tokenization mode is byte, then data
must be a bytes. If the type is wrong, TypeError raised.
"""
data = self.validate_datatype(data)
if data is None:
raise TypeError("Incorrect data given for tokenization mode.")
self._graph = graph.Graph()
if self._level is 0:
for i in range(len(data)):
state = tuple(data[i:i+1])
self._graph.add_edge(state)
else:
for i in range(len(data) - self._level + 1):
# get a slice of self._level tokens to store in the graph
state = tuple(data[i:i+self._level])
self._graph.add_edge(state)
def validate_datatype(self, data):
"""
Ensures the validity of the given data type with the Tokenization mode,
returning data in the correct form for future iteration or None if
invalid combination of data and mode.
"""
if self._mode is Tokenization.word and isinstance(data, str):
return data.split()
elif (self._mode is Tokenization.character and isinstance(data, str) or
self._mode is Tokenization.byte and isinstance(data, bytes) or
self._mode is Tokenization.none and hasattr(data, '__iter__')):
return data
else:
return None
def train_input(args):
"""
Constructs a RandomWriter using the given level and tokenization.
Then trains on the input file or stdin.
Finally, it pickles itself to the output file or stdout.
"""
if args.character:
tokenization = Tokenization.character
elif args.byte:
tokenization = Tokenization.byte
else:
tokenization = Tokenization.word
rw = RandomWriter(args.level, tokenization)
if args.input is sys.stdin:
data = args.input.read()
rw.train_iterable(data)
else:
rw.train_url(args.input)
rw.save_pickle(args.output)
def generate_output(args):
"""
Constructs a RandomWriter from a pickle and proceeds to output the
given amount of generated tokens.
"""
rw = RandomWriter.load_pickle(args.input)
rw.generate_file(args.output, args.amount)
if __name__ == '__main__':
"""
Handles parsing of command line arguments.
"""
parser = argparse.ArgumentParser(add_help=True)
subparsers = parser.add_subparsers()
# The train argument
parser_train = subparsers.add_parser('train', help="Train a model given "
"input and save to pickle output.")
parser_train.add_argument('--input', default=sys.stdin)
parser_train.add_argument('--output', default=sys.stdout.buffer)
token_group = parser_train.add_mutually_exclusive_group()
token_group.add_argument('--word', action='store_true')
token_group.add_argument('--character', action='store_true')
token_group.add_argument('--byte', action='store_true')
parser_train.add_argument('--level', type=int, default=1)
parser_train.set_defaults(func=train_input)
# The generate argument
parser_generate = subparsers.add_parser('generate', help="Generate an "
"output file.")
parser_generate.add_argument('--input', default=sys.stdin.buffer)
parser_generate.add_argument('--output', default=sys.stdout)
parser_generate.add_argument('--amount', required=True, type=int)
parser_generate.set_defaults(func=generate_output)
# because we are only using subparsers, argparse will not print help
# by default, so do it manually.
if len(sys.argv) == 1:
parser.print_help()
exit(1)
args = parser.parse_args()
args.func(args)
``` |
{
"source": "JKDingwall/socketpool",
"score": 3
} |
#### File: socketpool/examples/test_eventlet.py
```python
import eventlet
from socketpool.pool import ConnectionPool
from socketpool.conn import TcpConnector
# this handler will be run for each incoming connection in a dedicated greenlet
class EchoServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.spool = eventlet.GreenPool()
self.running = False
self.server = None
def start(self):
eventlet.spawn(self.run)
def run(self):
self.server = eventlet.listen((self.host, self.port))
self.running = True
while self.running:
try:
sock, address = self.server.accept()
print "accepted", address
self.spool.spawn_n(self.handle, sock, address)
except (SystemExit, KeyboardInterrupt):
break
def handle(self, sock, address):
print ('New connection from %s:%s' % address)
while True:
data = sock.recv(1024)
if not data:
break
sock.send(data)
print ("echoed %r" % data)
def stop(self):
self.running = False
if __name__ == '__main__':
import time
options = {'host': 'localhost', 'port': 6000}
pool = ConnectionPool(factory=TcpConnector, options=options,
backend="eventlet")
server = EchoServer('localhost', 6000)
server.start()
epool = eventlet.GreenPool()
def runpool(data):
print 'ok'
with pool.connection() as conn:
print 'sending'
sent = conn.send(data)
print 'send %d bytes' % sent
echo_data = conn.recv(1024)
print "got %s" % data
assert data == echo_data
start = time.time()
_ = [epool.spawn(runpool, "blahblah") for _ in xrange(20)]
epool.waitall()
server.stop()
delay = time.time() - start
```
#### File: socketpool/socketpool/conn.py
```python
import socket
import time
import random
from socketpool import util
class Connector(object):
def matches(self, **match_options):
raise NotImplementedError()
def is_connected(self):
raise NotImplementedError()
def handle_exception(self, exception):
raise NotImplementedError()
def get_lifetime(self):
raise NotImplementedError()
def invalidate(self):
raise NotImplementedError()
class UnixConnector(Connector):
def __init__(self, socket_file, backend_mod, pool=None):
self._s = backend_mod.Socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket_file = socket_file
self._s.connect(self.socket_file)
self.backend_mod = backend_mod
self._connected = True
self._life = time.time() - random.randint(0, 10)
self._pool = pool
def __del__(self):
self.release()
def matches(self, **match_options):
target_sock = match_options.get('socket_file')
return target_sock == self.socket_file
def is_connected(self):
if self._connected:
return util.is_connected(self._s)
return False
def handle_exception(self, exception):
print('got an exception')
print(str(exception))
def get_lifetime(self):
return self._life
def invalidate(self):
self._s.close()
self._connected = False
self._life = -1
def release(self):
if self._pool is not None:
if self._connected:
self._pool.release_connection(self)
else:
self._pool = None
def send(self, data):
return self._s.send(data)
def recv(self, size=1024):
return self._s.recv(size)
class TcpConnector(Connector):
def __init__(self, host, port, backend_mod, pool=None, mode='r',
bufsize=-1):
self._s = backend_mod.Socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
self._s_file = self._s.makefile(mode, bufsize)
self.host = host
self.port = port
self.backend_mod = backend_mod
self._connected = True
# use a 'jiggle' value to make sure there is some
# randomization to expiry, to avoid many conns expiring very
# closely together.
self._life = time.time() - random.randint(0, 10)
self._pool = pool
def __del__(self):
self.release()
def matches(self, **match_options):
target_host = match_options.get('host')
target_port = match_options.get('port')
return target_host == self.host and target_port == self.port
def is_connected(self):
if self._connected:
return util.is_connected(self._s)
return False
def handle_exception(self, exception):
print('got an exception')
print(str(exception))
def get_lifetime(self):
return self._life
def invalidate(self):
self._s.close()
self._s_file.close()
self._connected = False
self._life = -1
def release(self):
if self._pool is not None:
if self._connected:
self._pool.release_connection(self)
else:
self._pool = None
def read(self, size=-1):
return self._s_file.read(size)
def readline(self, size=-1):
return self._s_file.readline(size)
def readlines(self, sizehint=0):
return self._s_file.readlines(sizehint)
def sendall(self, *args):
return self._s.sendall(*args)
def send(self, data):
return self._s.send(data)
def recv(self, size=1024):
return self._s.recv(size)
``` |
{
"source": "jkdufair/TweetyFeels",
"score": 3
} |
#### File: TweetyFeels/src/bitcoin.py
```python
import json
import ssl
import csv
import time
from collections import deque
import urllib.request
BITCOIN_DATA_QUEUE = deque()
STATS = {'count': 0}
def get_bitcoin_data():
"""Get price and volume, etc. from Cryptonator API"""
url = 'https://api.cryptonator.com/api/ticker/btc-usd'
fix_this_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
request = urllib.request.Request(url)
response = urllib.request.urlopen(request, context=fix_this_context).read()
STATS['count'] = STATS['count'] + 1
return json.loads(response.decode('utf-8'))
def queue_bitcoin_data(bitcoin_data):
"""Queue the bitcoin data for writing later"""
processed_bitcoin_data = {
'price': bitcoin_data['ticker']['price'],
'volume': bitcoin_data['ticker']['volume'],
'timestamp': bitcoin_data['timestamp']
}
BITCOIN_DATA_QUEUE.append(processed_bitcoin_data)
def write_bitcoin_data_from_buffer():
"""Take the status objects from the bitcoin data buffer and append them to the csv"""
print('bitcoin write_bitcoin_data_from_buffer() started')
while True:
time.sleep(60)
with open('data/bitcoin_stream.csv', 'a') as csv_file:
field_names = ['price', 'volume', 'timestamp']
writer = csv.DictWriter(csv_file, fieldnames=field_names)
while BITCOIN_DATA_QUEUE:
bitcoin_data = BITCOIN_DATA_QUEUE.popleft()
writer.writerow(bitcoin_data)
def stream_bitcoin_data():
"""Poll the bitcoin data every 60 seconds and persist"""
print('bitcoin stream_bitcoin_data() started')
while True:
queue_bitcoin_data(get_bitcoin_data())
time.sleep(60)
def get_stats():
"""Return statistics about counts and buffer size"""
return {
'count': STATS['count'],
'buffer_size': len(BITCOIN_DATA_QUEUE)
}
```
#### File: TweetyFeels/src/twitter_auth.py
```python
import pickle
import webbrowser
import os.path
import tweepy
def deserialize_token():
"""Get token from file if it exists"""
if os.path.exists(".token"):
the_file = open(".token", "rb")
return pickle.load(the_file)
else:
return
def serialize_token(token):
"""Save token to file"""
the_file = open(".token", "wb")
pickle.dump(token, the_file)
def authenticate():
"""Create tweepy auth object and return"""
consumer_key = 'aN6bbamocujBEQkTsT4V5Ok36'
consumer_secret = '<KEY>'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# "<KEY>"
access_token = deserialize_token()
if access_token is None:
url = auth.get_authorization_url()
webbrowser.open(url, new=1, autoraise=True)
pin = input('Verification pin number from twitter.com: ').strip()
access_token = auth.get_access_token(verifier=pin)
serialize_token(access_token)
auth.set_access_token(access_token[0], access_token[1])
return auth
``` |
{
"source": "jke94/fbref-stats",
"score": 3
} |
#### File: jke94/fbref-stats/stats.py
```python
__author__ = "<NAME> (@JaviKarra94)"
__copyright__ = "Copyright (c) 2022 <NAME>"
__license__ = "MIT License"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__twiter__ = "@JaviKarra94"
__status__ = "Production"
import pandas as pd
import os
import matplotlib.pyplot as plt
def GenerateClassificationPNGFigures(urls, leagues):
"""Generates a set of images about raking points by each set of leagues.
Parameters
----------
urls : list
List of urls with information about the leagues ranking.
leagues : list
Name of the leagues.
"""
if not os.path.exists('Classification'):
os.makedirs('Classification')
for item in range(0, len(urls)):
dfs = pd.read_html(urls[item])
clasificacion = dfs[0]
plot = clasificacion.plot(x="Squad", y=["Pts"], kind="bar", xlabel ="Teams", ylabel = "Points")
fig = plot.get_figure()
fig.savefig(os.path.join('.\Classification', leagues[item].strip()), bbox_inches='tight')
def GenerateFullClassificationPNGFigures(urls, leagues, rows, columns):
"""Generates an image with subplots of images about raking points by each set of leagues.
Parameters
----------
urls : list
List of urls with information about the leagues ranking.
leagues : list
Name of the leagues.
rows : list
Number of rows in the plot.
columns : list
Number of columns in the plot.
"""
if not os.path.exists('Classification'):
os.makedirs('Classification')
figure_full, axs = plt.subplots(nrows = rows, ncols = columns, figsize=(15, 10))
figure_full.suptitle('Classification (Twitter: @JaviKarra94)')
dataframes = []
plots = []
for item in range(0, len(urls)):
dfs = pd.read_html(urls[item])
dataframes.append(dfs[0]) # Get clasificacion table league dataframe
# Create plots
for i in range(0, rows):
for j in range(0, columns):
plots.append(dataframes[j * rows + i].plot(x="Squad", y=["Pts"], kind="bar", ax=axs[i,j], title = leagues[j * rows + i], xlabel = "Teams", ylabel = "Points"))
for item in (range(0, len(plots))):
plots[item].grid(axis='y', linestyle='--')
figure_full.tight_layout()
figure_full.savefig(os.path.join('.\Classification', 'AllLeagues'), bbox_inches='tight')
def GenerateFullGoalsForVsGoalsAgainstPNGFigures(urls, leagues, rows, columns):
"""Generates an image with subplots of images about local goals vs against goals set of leagues.
Parameters
----------
urls : list
List of urls with information about the leagues ranking.
leagues : list
Name of the leagues.
rows : list
Number of rows in the plot.
columns : list
Number of columns in the plot.
"""
if not os.path.exists('Goals'):
os.makedirs('Goals')
figure_full, axs = plt.subplots(nrows=rows, ncols=columns, figsize=(15, 10))
figure_full.suptitle('Goals for (GF) Vs Goals against (GA) (Twitter: @JaviKarra94)')
dataframes = []
plots = []
for item in range(0, len(urls)):
dfs = pd.read_html(urls[item])
dataframes.append(dfs[0]) # Get clasificacion table league dataframe
# Create plots
for i in range(0, rows):
for j in range(0, columns):
plots.append(dataframes[j * rows + i].plot(x="Squad", y=["GF","GA"], kind="bar", ax=axs[i,j], title = leagues[j * rows + i], xlabel = "Teams", ylabel = "Goals"))
for item in (range(0, len(plots))):
plots[item].grid(axis='y', linestyle='--')
figure_full.tight_layout()
figure_full.savefig(os.path.join('.\Goals', 'AllLeagues'), bbox_inches='tight')
def GenerateGoalsForVsGoalsAgainstPNGFigures(urls, leagues):
"""Generates a set of images about local goals vs against goals set of leagues.
Parameters
----------
urls : list
List of urls with information about the leagues ranking.
leagues : list
Name of the leagues.
"""
if not os.path.exists('Goals'):
os.makedirs('Goals')
for item in range(0, len(urls)):
dfs = pd.read_html(urls[item])
clasificacion = dfs[0]
plot = clasificacion.plot(x="Squad", y=["GF","GA"], kind="bar", xlabel ="Teams", ylabel = "Goals")
fig = plot.get_figure()
fig.savefig(os.path.join('.\Goals', leagues[item].strip()), bbox_inches='tight')
if __name__ == "__main__":
urls = [
"https://fbref.com/en/comps/12/La-Liga-Stats", # La Liga (ESP)
"https://fbref.com/en/comps/9/Premier-League-Stats", # Premiere League (ENG)
"https://fbref.com/en/comps/11/Serie-A-Stats", # Serie A (ITA)
"https://fbref.com/en/comps/13/Ligue-1-Stats", # Ligue 1 (FRA)
"https://fbref.com/en/comps/20/Bundesliga-Stats", # Bundesliga (ALE)
"https://fbref.com/en/comps/23/Eredivisie-Stats" # Eredivisie (NED)
]
leagues = [
'La Liga (ESP)',
'Premiere League (ENG)',
'Serie A (ITA)',
'Ligue 1 (FRA)',
'Bundesliga (ALE)',
'Eredivisie (NED)'
]
GenerateFullClassificationPNGFigures(urls, leagues, 2, 3)
GenerateClassificationPNGFigures(urls, leagues)
GenerateFullGoalsForVsGoalsAgainstPNGFigures(urls, leagues, 2, 3)
GenerateGoalsForVsGoalsAgainstPNGFigures(urls, leagues)
``` |
{
"source": "jke94/WilliamHill-WebScraping",
"score": 4
} |
#### File: src/utilsmodule/WilliamHillURLs.py
```python
import requests
from bs4 import BeautifulSoup
from urlvalidator import validate_url, ValidationError
class WilliamHillURLs:
"""Auxiliar class with data about William Hill Web to scraping data.
Returns:
WilliamHillURLs: An object with data and auxiliar functions.
Attributes
----------
BaseURL : str
William Hill URL base web page.
URL_FootballOnDirect : str
Football matches URL in direct on William Hill web page.
URL_TenisOnDirect : str
Tenis matches URL in direct on William Hill web page.
URL_BasketOnDirect : str
Basket matches URL in direct on William Hill web page.
"""
BaseURL = 'https://sports.williamhill.es/'
URL_FootballOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/f%C3%BAtbol'
URL_TenisOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/tenis'
URL_BasketOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/baloncesto'
def GetAllUrlMatches(self, urlSport=URL_FootballOnDirect):
"""Get all url matchs from a sport (by default foorball urls.). Validate each URL if it´s a URL valid.
Args:
urlSport (str, optional): Football mathes on direct URL William Hill . Defaults to "https://sports.williamhill.es/betting/es-es/en-directo/f%C3%BAtbol".
Returns:
list: List with all URL matches.
"""
req = requests.get(urlSport)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
aux = soup.findAll("a", {"class": ['btmarket__name btmarket__name--featured']})
auxList = []
for item in aux:
try:
theUrl = (self.BaseURL + item['href']).replace("//","/").replace("https:/","https://")
validate_url(theUrl)
auxList.append(theUrl)
except ValidationError:
raise ValidationError(theUrl)
return auxList
def GetAllMatchsPlayedActually(self, urlSport=URL_FootballOnDirect):
"""Get all sport matches played in the actuall moment.
Args:
urlSport (str, optional): A William Hill URL sport. Defaults to URL_FootballOnDirect.
Returns:
list: List with all matches and its bets.
"""
req = requests.get(urlSport)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "lxml")
matches = soup.findAll("div", {"class": "btmarket__link-name btmarket__link-name--ellipsis show-for-desktop-medium"})
listaApuestas = soup.findAll("div", {"class": "btmarket__selection"})
matchList = []
for item in matches:
var = item.text + ': ' + listaApuestas[0].text + ' | ' + listaApuestas[1].text + ' | ' + listaApuestas[2].text
matchList.append(var)
return matchList
def ConvertFractionalBetToDecimalBet(self, theBet):
"""Convert a fraccioanl bet str to
Args:
theBet (str): A fractional bet.
Returns:
[str]: A decimal bet.
"""
bet = 0.0
aux = str(theBet).split('/')
bet = (int(aux[0], 10) / int(aux[1], 10) ) + 1
return str(round(bet, 2))
def GetAllBetsFromURLMatch(self, url):
"""Get all bets actually from a match.
Args:
url (str): Match URL
Returns:
[type]: A list with the diferents bets availables.
"""
allBetsList = []
req = requests.get(url)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
aux = soup.findAll("h2", {"class" : ['fl']})
# print('Number of diferent bets: ', len(aux), ', Match URL: ', url)
for item in aux:
allBetsList.append(item.text)
# print(item.text,'|',type(item.text), item['class'])
return allBetsList
``` |
{
"source": "jkeam/habatica-pyside2",
"score": 3
} |
#### File: habatica-pyside2/lib/task.py
```python
from enum import Enum
class Priority:
@staticmethod
def priority_index_to_value(index):
priority_values = ['0.1', '1', '1.5', '2']
return priority_values[index]
@staticmethod
def priority_value_to_index(value):
priority_values = ['0.1', '1', '1.5', '2']
return priority_values.index(value)
class TaskType(Enum):
HABIT = 'habit'
DAILY = 'daily'
TODO = 'todo'
REWARD = 'reward'
def __str__(self):
return self.value
class Task:
def __init__(self):
self.id = None
self.text = ''
self.notes = ''
self.priority = ''
```
#### File: habatica-pyside2/lib/widget_registry.py
```python
from enum import Enum
class WidgetRegistryName(Enum):
ITEM_GROUP_TASKS = 'item_group_tasks'
TASK_INPUT = 'task_input'
TASK_TEXTAREA = 'task_textarea'
TASK_PRIORITY = 'task_priority'
ITEM_GROUP = 'item_group'
ITEM_GROUP_LAYOUT = 'item_group_layout'
SAVE_BUTTON = 'save_button'
class WidgetRegistry:
def __init__(self):
self.registry = {}
def store(self, key, value):
self.registry[key] = value
return self
def retrieve(self, key, default=None):
return self.registry.get(key, default)
``` |
{
"source": "JKearnsl/HentaiChanApi",
"score": 3
} |
#### File: HentaiChanApi/hentai_chan_api/content_parser.py
```python
import json
import re
import requests
from bs4 import BeautifulSoup
class MangaContent:
def __init__(self, session: requests.Session, manga_url: str):
self._session = session
self._url = manga_url
@property
def images(self) -> list[str]:
raw_html = self._session.get(self._url, params={'development_access': 'true'}).content
raw_js = BeautifulSoup(raw_html, 'html.parser').find_all('script')[2].text
var_data = raw_js.replace('createGallery(data)', '').replace(' ', '').replace('\n', '').replace("'", '"')
pattern = re.compile(r"var data = (\{.*?\})$", re.MULTILINE | re.DOTALL)
if var_data:
obj = pattern.search(var_data).group(1)
obj = json.loads(obj)
return obj['fullimg']
else:
return []
``` |
{
"source": "j-keck/nodemcu-uploader",
"score": 2
} |
#### File: nodemcu-uploader/nodemcu_uploader/utils.py
```python
from platform import system
from os import environ
from wrapt import ObjectProxy
from sys import version_info
__all__ = ['default_port', 'system', 'hexify', 'from_file', 'wrap', 'PY2', 'ENCODING']
PY2 = version_info.major == 2
ENCODING = 'latin1'
def default_port(sysname=system()):
"""This returns the default port used for different systems if SERIALPORT env variable is not set"""
system_default = {
'Windows': 'COM1',
'Darwin': '/dev/tty.SLAB_USBtoUART'
}.get(sysname, '/dev/ttyUSB0')
return environ.get('SERIALPORT', system_default)
def to_hex(x):
return hex(ord(x))
def hexify(byte_arr):
return ':'.join((to_hex(x)[2:] for x in byte_arr))
def from_file(path):
with open(path, 'rb') as f:
content = f.read()
return content if PY2 else content.decode(ENCODING)
class DecoderWrapper(ObjectProxy):
def read(self, *args, **kwargs):
res = self.__wrapped__.read(*args, **kwargs)
return res if PY2 else res.decode(ENCODING)
def write(self, data):
data = data if PY2 else data.encode(ENCODING)
return self.__wrapped__.write(data)
def wrap(x):
return DecoderWrapper(x)
``` |
{
"source": "j-keck/zfs-snap-diff",
"score": 2
} |
#### File: tests/browser_tests/tests.py
```python
import unittest
from typing import Any
from selenium import webdriver # type: ignore
import sys
import fs
from browser_tests.page import Page
from zfs import ZFS
class Tests(unittest.TestCase):
zfs = ZFS()
def setUp(self) -> None:
self.page = Page(headless = True)
self.assertIn("ZFS-Snap-Diff", self.page.title())
def testActualFileContent(self) -> None:
fs.createTestFile(self.zfs.mountpoint() + "/file.txt",
["firstline", "secondline", "thirdline"]
)
self.page.selectView("Browse filesystem")
self.page.selectDataset(self.zfs.dataset)
self.page.findByXPath("//td[contains(.,'file.txt')]").click()
self.assertIn("Current content of file.txt", self.page.findById("file-actions-header").text)
self.assertIn("firstline\nsecondline\nthirdline", self.page.findByCSS("#file-actions-body > pre > code").text)
def testCreateSnapshotInBrowseFilesystem(self) -> None:
self.page.selectView("Browse filesystem")
self.page.selectDataset(self.zfs.dataset)
self.page.createSnapshot("create-snapshot-in-browse-filesystem")
self.assertIn("@create-snapshot-in-browse-filesystem' created", self.page.alertText())
def testCreateSnapshotInBrowseSnapshots(self) -> None:
self.page.selectView("Browse snapshots")
self.page.selectDataset(self.zfs.dataset)
self.page.createSnapshot("create-snapshot-in-browse-snapshots")
self.assertIn("@create-snapshot-in-browse-snapshots' created", self.page.alertText())
def testDestroySnapshot(self) -> None:
self.page.selectView("Browse snapshots")
self.page.selectDataset(self.zfs.dataset)
# create snapshot
self.page.createSnapshot("destroy-snapshot")
self.page.closeAlert()
# destroy snapshot
self.page.destroySnapshot("destroy-snapshot")
self.assertIn("Snapshot 'destroy-snapshot' destroyed", self.page.alertText())
self.page.closeAlert()
def testRenameSnapshot(self) -> None:
self.page.selectView("Browse snapshots")
self.page.selectDataset(self.zfs.dataset)
# create snapshot
self.page.createSnapshot("rename-snapshot")
self.page.closeAlert()
# rename snapshot
self.page.renameSnapshot("rename-snapshot", "snapshot-rename")
self.assertIn("Snapshot 'rename-snapshot' renamed to 'snapshot-rename'", self.page.alertText())
self.page.closeAlert()
def testCloneSnapshot(self) -> None:
self.page.selectView("Browse snapshots")
self.page.selectDataset(self.zfs.dataset)
# create snapshot
self.page.createSnapshot("clone-snapshot")
self.page.closeAlert()
# clone snapshot
self.page.cloneSnapshot("clone-snapshot", "cloned")
self.assertIn("Snapshot 'clone-snapshot' cloned to '"+self.zfs.pool+"/cloned'", self.page.alertText())
self.page.closeAlert()
def testRollbackSnapshot(self) -> None:
self.page.selectView("Browse snapshots")
self.page.selectDataset(self.zfs.dataset)
# create snapshot
self.page.createSnapshot("rollback-snapshot")
self.assertIn("@rollback-snapshot' created", self.page.alertText())
self.page.closeAlert()
# create a file
fs.createTestFile(self.zfs.mountpoint() + "/rollback-test.txt", ["dummy"])
self.assertTrue(fs.exists(self.zfs.mountpoint() + "/rollback-test.txt"))
# rollback
self.page.rollbackSnapshot("rollback-snapshot")
self.assertIn("Snapshot 'rollback-snapshot' rolled back", self.page.alertText())
self.assertFalse(fs.exists(self.zfs.mountpoint() + "/rollback-test.txt"))
def tearDown(self) -> None:
self.page.close()
``` |
{
"source": "jkedmiston/pandas-examples",
"score": 3
} |
#### File: pandas-examples/documentation_generator/process_ipynb.py
```python
import os
def prep_line_for_jupyter(l):
"""
"""
if len(l) > 2:
if l[0] == "#" and l[1] == "%":
l = l[1:]
h = l.replace('"', r'\"').replace("\n", "\\n").replace("'", r'\"')
out = '"%s"' % h
out += ",\n"
return out
def prep_lines_for_jupyter(fname):
g = open(fname, 'r')
lines = g.readlines()
out = ''
for l in lines:
out += prep_line_for_jupyter(l)
pass
return out
def replace_exec_line_if_detected(l):
"""
If find a line with exec(open, open the contents of that file and extract those lines and return them.
"""
if l.count("exec(open") == 0:
retval = l
else:
# make sure the exec file is a python file.
if l.count('.py') == 1:
idx = l.index('.py')
suffix = l[idx:idx+3]
i = l.index('open(') + 6
while 1:
test_file_to_inject = l[i:idx] + suffix
if os.path.isfile(test_file_to_inject):
retval = prep_lines_for_jupyter(test_file_to_inject)
retval0 = '"# cell source file: %s\\n",\n' % test_file_to_inject
if retval[0:2] == '"%':
txt1 = retval[:-2].replace('\\n', 'xx').split('\n')[0]
txt = retval[:-2][len(txt1) + 1:]
retval = txt1 + retval0 + txt + "\n"
retval = retval.replace('xx', '\\n')
else:
retval = retval0 + retval[:-2] + "\n"
break
i += 1
return retval
def reset_execution_count_in_ipynb(fname):
lines = open(fname, 'r').readlines()
all_lines = []
for kk, line in enumerate(lines):
if line.count('"execution_count"'):
line = '"execution_count":null,\n'
all_lines.append(line)
g = open(fname, 'w')
g.write('\n'.join(all_lines))
g.close()
print("%s wrote: %s" % (__file__, g.name))
def replace_exec_cells_in_ipynb(fname, fnameout):
lines = open(fname, 'r').readlines()
all_lines = []
for kk, line in enumerate(lines):
new_line = replace_exec_line_if_detected(line)
all_lines.append(new_line)
pass
g = open(fnameout, "w")
g.write('\n'.join(all_lines))
g.close()
print("%s wrote: %s" % (__file__, g.name))
def process_ipynb(inputfile, output):
print("processing %s -> %s" % (inputfile, output))
replace_exec_cells_in_ipynb(inputfile, output)
reset_execution_count_in_ipynb(output)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser('')
parser.add_argument('input')
parser.add_argument('--output', dest='output', required=False, default=None)
args = parser.parse_args()
if args.output is None:
output = args.input.replace('.ipynb', '_out.ipynb')
else:
output = args.output
if args.input.count('.ipynb') == 0:
raise Exception("expecting ipynb input, you have %s" % args.input)
process_ipynb(args.input, output)
```
#### File: pandas-examples/tests/test_file_sniffer.py
```python
from documentation_generator.file_sniffer import detect_multi_line_assignment
from documentation_generator.file_sniffer import detect_multi_line_function
from documentation_generator.process_ipynb import replace_exec_line_if_detected
def test_replace_exec_line_if_detected():
"""
Tests the execfile replacement capability in creating jupyter notebook
from primitives.
"""
test_line = "exec(open('tests/test_file_sniffer.py').read())"
l = replace_exec_line_if_detected(test_line)
# someone cannabilistic to self test this file.
test_lines = open("tests/test_file_sniffer.py").readlines()
exec_lines = l.split("\n")
for kk, line in enumerate(test_lines):
if kk == 0:
continue
exec_line = exec_lines[kk + 1]
# excludes \\n", from the end of lines.
# the replacement puts in commas at the end of new lines, and
# newlines are replaced with \\n. If
# there isn't a comma then just do the \\n exclusion.
# the selection starts at 1 to avoid the " at the start of the line.
if exec_line[-1] == ",":
exec_line = exec_line[1:-4]
else:
exec_line = exec_line[1:-3]
test_line = test_lines[kk][:-1]
if test_line.count('"') or test_line.count("\\") or test_line.count("'"):
# TODO... these cases currently too complicated to test
continue
assert exec_line == test_line
def test_detect_multi_line_assignment():
text = """
%(core)s
%(nextline)s
lines5
"""
core = """a = testfunc([43,
44,
45, 46, 47, 48,
50, 51])"""
nextline = "nextl"
text = text % dict(core=core, nextline=nextline)
lines = text.split('\n')
lines = list(map(lambda x: x+"\n", lines))
out = detect_multi_line_assignment(0, lines)
assert out['index'] == 1
assert out['continue'] == 1
out = detect_multi_line_assignment(1, lines)
assert out["output"] == core + "\n"
assert out["index"] == 1 + 4
assert lines[out["index"]] == nextline + "\n"
def test_detect_multi_line_function():
text = """
%(core)s
%(nextline)s
lines5
"""
core = """def a1(x):
print(x)
return"""
nextline = "nextl"
text = text % dict(core=core, nextline=nextline)
lines = text.split('\n')
lines = list(map(lambda x: x+"\n", lines))
out = detect_multi_line_function(0, lines)
assert out['index'] == 1
assert out['continue'] == 1
out = detect_multi_line_function(1, lines)
assert out["output"] == core + "\n"
assert out["index"] == 1 + 3
assert lines[out["index"]] == nextline + "\n"
if __name__ == "__main__":
test_detect_multi_line_assignment()
test_detect_multi_line_function()
test_replace_exec_line_if_detected()
``` |
{
"source": "jkedra/flask-dance-multi-provider",
"score": 2
} |
#### File: flask-dance-multi-provider/app/__init__.py
```python
from flask import Flask
from .config import Config
from .models import db, bcrypt, login_manager
from .oauth import github_blueprint, google_blueprint
from .views import auth_blueprint, main_blueprint
from .cli import create_db, shell_context_processor
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
app.register_blueprint(github_blueprint, url_prefix="/login")
app.register_blueprint(google_blueprint, url_prefix="/login")
app.register_blueprint(auth_blueprint)
app.register_blueprint(main_blueprint)
app.cli.add_command(create_db)
app.shell_context_processors.append(shell_context_processor)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
return app
```
#### File: app/views/main.py
```python
from flask import Blueprint, render_template
blueprint = Blueprint("main", __name__)
@blueprint.route("/")
def index():
return render_template("home.j2")
``` |
{
"source": "jkedra/PodcastMirror",
"score": 3
} |
#### File: jkedra/PodcastMirror/Mirror.py
```python
import os
import sys
#
import argparse
import logging
import configparser
from DAB import DABItem
from Podcast import Podcast
import datetime
#
# import mp3
def initLog(log, args):
"""Initialize logging system
:params log: logger object
:params args: argsparse.parse_args()
"""
log_levels = {None: logging.WARN,
1: logging.INFO,
2: logging.DEBUG}
log.setLevel(log_levels[args.verbose])
if args.logfile:
chnLog = logging.FileHandler(args.logfile, mode='a')
chnLog.setFormatter(
logging.Formatter("%(asctime)s [%(levelname)s]: %(message)s"))
log.addHandler(chnLog)
if not args.silent:
# create console handler and set level to debug
chnStr = logging.StreamHandler()
chnStr.setFormatter(logging.Formatter("%(levelname)s:%(message)s"))
log.addHandler(chnStr)
def initParser():
"""
https://docs.python.org/3/howto/argparse.html
https://docs.python.org/3/library/argparse.html#module-argparse
Returns argparse.Namespace object.
"""
p = argparse.ArgumentParser()
p.add_argument("-v", "--verbose", action="count",
help="increases verbosity")
p.add_argument("-d", "--days", type=int, default=30,
help="how far in the past go")
p.add_argument("-t", "--target", default="DATA",
help="target data directory")
p.add_argument("-l", "--logfile",
help="logging to file, log file path required")
p.add_argument("-s", "--silent", action="store_true",
help="silent mode - suppress terminal output")
return p.parse_args()
# MAIN PROGRAM STARTS HERE
# KeyboardInterrupt
args = initParser()
log = logging.getLogger(__name__)
initLog(log, args)
config = configparser.ConfigParser()
cf = config.read(['mirror.cfg', os.path.expanduser('~/.mirror.cfg')])
if len(cf) == 0:
print("config file not found")
sys.exit(-1)
else:
log.debug("config file %s" % str(cf))
baseurl = config.get("RSS", "baseurl")
target_dir = config.get("Mirror", "data") or args.target
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
log.debug(f"changing dir to {target_dir}")
os.chdir(target_dir)
for pi in Podcast(baseurl, DABItem):
podcast_age = datetime.datetime.now() - pi.date
if podcast_age > datetime.timedelta(days=args.days):
log.debug(f"{pi.name} {pi.date} too old")
continue
# can you ever download the file, reiterate if not
try:
pi.getsize()
except (IOError, TypeError) as e:
print(f"IOError, TypeError {e}")
continue
verbose = args.verbose and args.verbose > 1 and not args.silent
pi.download_description()
pi.download(verbose)
```
#### File: jkedra/PodcastMirror/test_appendThenRemove.py
```python
from unittest import TestCase
class TestAppendThenRemove(TestCase):
def setUp(self):
import tempfile
with tempfile.NamedTemporaryFile(delete=False) as src:
src.write(b"AAA")
self.srcname = src.name
with tempfile.NamedTemporaryFile(delete=False) as dst:
self.dstname = dst.name
@staticmethod
def remove(fname):
import os
try:
os.remove(fname)
except FileNotFoundError:
pass
def test_appendThenRemove_unlink(self):
from Podcast import appendThenRemove
import os
appendThenRemove(self.srcname, self.dstname)
if os.path.isfile(self.srcname):
self.fail('File exists but should be removed.')
def tearDown(self):
self.remove(self.srcname)
self.remove(self.dstname)
``` |
{
"source": "jkeelan/faculty-cli",
"score": 2
} |
#### File: faculty-cli/test/test_projects.py
```python
import pytest
from click.testing import CliRunner
from faculty_cli.cli import cli
from faculty.clients.project import ProjectClient
import faculty.clients.base
from test.fixtures import PROJECT, USER_ID
@pytest.fixture
def mock_update_check(mocker):
mocker.patch("faculty_cli.update.check_for_new_release")
@pytest.fixture
def mock_check_credentials(mocker):
mocker.patch("faculty_cli.cli._check_credentials")
@pytest.fixture
def mock_user_id(mocker):
mocker.patch("faculty_cli.auth.user_id", return_value=USER_ID)
def test_list_projects(
mocker,
mock_update_check,
mock_check_credentials,
mock_profile,
mock_user_id,
):
runner = CliRunner()
schema_mock = mocker.patch("faculty.clients.project.ProjectSchema")
mocker.patch.object(ProjectClient, "_get", return_value=[PROJECT])
result = runner.invoke(cli, ["project", "list"])
assert result.exit_code == 0
assert result.output == PROJECT.name + "\n"
ProjectClient._get.assert_called_once_with(
"/user/{}".format(USER_ID), schema_mock.return_value
)
def test_list_projects_verbose(
mocker,
mock_update_check,
mock_check_credentials,
mock_profile,
mock_user_id,
):
runner = CliRunner()
schema_mock = mocker.patch("faculty.clients.project.ProjectSchema")
mocker.patch.object(ProjectClient, "_get", return_value=[PROJECT])
result = runner.invoke(cli, ["project", "list", "-v"])
tpl = "Project Name ID\n{} {}\n"
assert result.exit_code == 0
assert result.output == tpl.format(PROJECT.name, PROJECT.id)
ProjectClient._get.assert_called_once_with(
"/user/{}".format(USER_ID), schema_mock.return_value
)
def test_create_project(
mocker,
mock_update_check,
mock_check_credentials,
mock_profile,
mock_user_id,
):
runner = CliRunner()
mocker.patch.object(ProjectClient, "create", return_value=PROJECT)
result = runner.invoke(cli, ["project", "new", "test-project"])
assert result.exit_code == 0
assert result.output == "Created project {} with ID {}\n".format(
PROJECT.name, PROJECT.id
)
ProjectClient.create.assert_called_once_with(USER_ID, "test-project")
def test_create_project_bad_request(
mocker,
mock_update_check,
mock_check_credentials,
mock_profile,
mock_user_id,
):
runner = CliRunner()
response = faculty.clients.base.BadRequest("response", error="some error")
mocker.patch.object(ProjectClient, "create", side_effect=response)
result = runner.invoke(cli, ["project", "new", "test-project"])
assert result.exit_code == 64
assert result.output == "some error\n"
``` |
{
"source": "jkeelan/faculty",
"score": 3
} |
#### File: faculty/clients/auth.py
```python
from requests.auth import AuthBase
class FacultyAuth(AuthBase):
"""Requests auth implementation for accessing Faculty services.
Parameters
----------
session : faculty.session.Session
The Faculty session to authenticate with
To perform an authenticated request against a Faculty service, first
construct an instance of this class with a session:
>>> import faculty.session
>>> session = faculty.session.get_session()
>>> auth = FacultyAuth(session)
then pass it as the ``auth`` argument when making a request with
``requests``:
>>> import requests
>>> requests.get(
'https://servicename.services.example.my.faculty.ai',
auth=auth
)
You can also set it as the ``auth`` attribute on a
:class:`requests.Session`, so that subsequent requests will be
authenticated automatically:
>>> import requests
>>> session = requests.Session()
>>> session.auth = auth
"""
def __init__(self, session):
self.session = session
def __call__(self, request):
access_token = self.session.access_token()
header_content = "Bearer {}".format(access_token.token)
request.headers["Authorization"] = header_content
return request
```
#### File: faculty/clients/environment.py
```python
import re
from collections import namedtuple
from enum import Enum
from marshmallow import (
ValidationError,
fields,
post_load,
validates,
post_dump,
)
from marshmallow_enum import EnumField
from faculty.clients.base import BaseClient, BaseSchema
class Constraint(Enum):
AT_LEAST = ">="
EQUAL = "=="
Version = namedtuple("Version", ["constraint", "identifier"])
PythonPackage = namedtuple("PythonPackage", ["name", "version"])
Pip = namedtuple("Pip", ["extra_index_urls", "packages"])
Conda = namedtuple("Conda", ["channels", "packages"])
PythonEnvironment = namedtuple("PythonEnvironment", ["pip", "conda"])
Apt = namedtuple("Apt", ["packages"])
AptPackage = namedtuple("AptPackage", ["name", "version"])
Script = namedtuple("Script", ["script"])
PythonSpecification = namedtuple("PythonSpecification", ["python2", "python3"])
Specification = namedtuple("Specification", ["apt", "bash", "python"])
Environment = namedtuple(
"Environment",
[
"id",
"project_id",
"name",
"description",
"author_id",
"created_at",
"updated_at",
"specification",
],
)
EnvironmentCreationResponse = namedtuple("EnvironmentCreationResponse", ["id"])
EnvironmentCreateUpdate = namedtuple(
"EnvironmentCreateUpdate", ["name", "description", "specification"]
)
PYTHON_VERSION_REGEX = re.compile(
r"^(?:\d+\!)?\d+(?:\.\d+)*(?:(?:a|b|rc)\d+)?(?:\.post\d+)?(?:\.dev\d+)?$"
)
APT_VERSION_REGEX = re.compile(r"^[a-zA-Z0-9\\.\\+-:~]+$")
class PythonVersionSchema(BaseSchema):
constraint = EnumField(Constraint, by_value=True, required=True)
identifier = fields.String(required=True)
@validates("identifier")
def validate_version_format(self, data):
if not PYTHON_VERSION_REGEX.match(data):
raise ValidationError("Invalid version format")
@post_load
def make_version(self, data):
return Version(**data)
@post_dump
def dump_version(self, data):
self.validate_version_format(data["identifier"])
return data
class AptVersionSchema(BaseSchema):
constraint = EnumField(Constraint, by_value=True, required=True)
identifier = fields.String(required=True)
@validates("identifier")
def validate_version_format(self, data):
if not APT_VERSION_REGEX.match(data):
raise ValidationError("Invalid version format")
@post_load
def make_version(self, data):
return Version(**data)
@post_dump
def dump_version(self, data):
self.validate_version_format(data["identifier"])
return data
class PythonVersionField(fields.Field):
"""Field that serialises/deserialises a Python package version."""
def _deserialize(self, value, attr, obj, **kwargs):
if value == "latest":
return "latest"
else:
return PythonVersionSchema().load(value)
def _serialize(self, value, attr, obj, **kwargs):
if value == "latest":
return "latest"
else:
return PythonVersionSchema().dump(value)
class AptVersionField(fields.Field):
"""Field that serialises/deserialises an apt package version."""
def _deserialize(self, value, attr, obj, **kwargs):
if value == "latest":
return "latest"
else:
return AptVersionSchema().load(value)
def _serialize(self, value, attr, obj, **kwargs):
if value == "latest":
return "latest"
else:
return AptVersionSchema().dump(value)
class PythonPackageSchema(BaseSchema):
name = fields.String(required=True)
version = PythonVersionField(required=True)
@post_load
def make_python_package(self, data):
return PythonPackage(**data)
class PipSchema(BaseSchema):
extra_index_urls = fields.List(
fields.String(), data_key="extraIndexUrls", required=True
)
packages = fields.List(fields.Nested(PythonPackageSchema()), required=True)
@post_load
def make_pip(self, data):
return Pip(**data)
class CondaSchema(BaseSchema):
channels = fields.List(fields.String(), required=True)
packages = fields.List(fields.Nested(PythonPackageSchema()), required=True)
@post_load
def make_conda(self, data):
return Conda(**data)
class PythonEnvironmentSchema(BaseSchema):
conda = fields.Nested(CondaSchema(), required=True)
pip = fields.Nested(PipSchema(), required=True)
@post_load
def make_python_specification(self, data):
return PythonEnvironment(**data)
class PythonSpecificationSchema(BaseSchema):
python2 = fields.Nested(
PythonEnvironmentSchema(), data_key="Python2", missing=None
)
python3 = fields.Nested(
PythonEnvironmentSchema(), data_key="Python3", missing=None
)
@post_load
def make_python(self, data):
return PythonSpecification(**data)
class AptPackageSchema(BaseSchema):
name = fields.String(required=True)
version = AptVersionField(required=True)
@post_load
def make_apt_package(self, data):
return AptPackage(**data)
class AptSchema(BaseSchema):
packages = fields.List(fields.Nested(AptPackageSchema()), required=True)
@post_load
def make_apt(self, data):
return Apt(**data)
class ScriptSchema(BaseSchema):
script = fields.String(required=True)
@post_load
def make_script(self, data):
return Script(**data)
class SpecificationSchema(BaseSchema):
apt = fields.Nested(AptSchema(), required=True)
bash = fields.List(fields.Nested(ScriptSchema()), required=True)
python = fields.Nested(PythonSpecificationSchema(), required=True)
@post_load
def make_specification(self, data):
return Specification(**data)
class EnvironmentSchema(BaseSchema):
id = fields.UUID(data_key="environmentId", required=True)
project_id = fields.UUID(data_key="projectId", required=True)
name = fields.String(required=True)
description = fields.String(missing=None)
author_id = fields.UUID(data_key="authorId", required=True)
created_at = fields.DateTime(data_key="createdAt", required=True)
updated_at = fields.DateTime(data_key="updatedAt", required=True)
specification = fields.Nested(SpecificationSchema(), required=True)
@post_load
def make_environment(self, data):
return Environment(**data)
class EnvironmentCreateUpdateSchema(BaseSchema):
name = fields.String(required=True)
description = fields.String(missing=None)
specification = fields.Nested(SpecificationSchema(), required=True)
@post_load
def make_environment_update(self, data):
return EnvironmentCreateUpdate(**data)
class EnvironmentCreationResponseSchema(BaseSchema):
id = fields.UUID(data_key="environmentId", required=True)
@post_load
def make_environment(self, data):
return EnvironmentCreationResponse(**data)
class EnvironmentClient(BaseClient):
SERVICE_NAME = "baskerville"
def list(self, project_id):
endpoint = "/project/{}/environment".format(project_id)
return self._get(endpoint, EnvironmentSchema(many=True))
def get(self, project_id, environment_id):
endpoint = "/project/{}/environment/{}".format(
project_id, environment_id
)
return self._get(endpoint, EnvironmentSchema())
def update(
self, project_id, environment_id, name, specification, description=None
):
content = EnvironmentCreateUpdate(
name=name, specification=specification, description=description
)
endpoint = "/project/{}/environment/{}".format(
project_id, environment_id
)
self._put_raw(
endpoint, json=EnvironmentCreateUpdateSchema().dump(content)
)
def create(self, project_id, name, specification, description=None):
endpoint = "/project/{}/environment".format(project_id)
content = EnvironmentCreateUpdate(
name=name, specification=specification, description=description
)
response = self._post(
endpoint,
EnvironmentCreationResponseSchema(),
json=EnvironmentCreateUpdateSchema().dump(content),
)
return response.id
def delete(self, project_id, environment_id):
endpoint = "/project/{}/environment/{}".format(
project_id, environment_id
)
self._delete_raw(endpoint)
```
#### File: faculty/clients/user.py
```python
from collections import namedtuple
from enum import Enum
from marshmallow import fields, post_load
from marshmallow_enum import EnumField
from faculty.clients.base import BaseSchema, BaseClient
class GlobalRole(Enum):
BASIC_USER = "global-basic-user"
FULL_USER = "global-full-user"
ADMIN = "global-admin"
User = namedtuple(
"User",
[
"id",
"username",
"full_name",
"email",
"created_at",
"enabled",
"global_roles",
"is_system",
],
)
class UserSchema(BaseSchema):
id = fields.UUID(data_key="userId", required=True)
username = fields.Str(required=True)
full_name = fields.Str(data_key="fullName", missing=None)
email = fields.Str(required=True)
created_at = fields.DateTime(data_key="createdAt", required=True)
enabled = fields.Boolean(required=True)
global_roles = fields.List(
EnumField(GlobalRole, by_value=True),
data_key="globalRoles",
missing=None,
)
is_system = fields.Boolean(data_key="isSystem", required=True)
@post_load
def make_user(self, data):
return User(**data)
class UserClient(BaseClient):
SERVICE_NAME = "flock"
def get_user(self, user_id):
endpoint = "/user/{}".format(user_id)
response = self._get(endpoint, UserSchema())
return response
def get_all_users(self, is_system=None, enabled=None):
params = {}
if is_system is not None:
params["isSystem"] = "true" if is_system else "false"
if enabled is not None:
params["isDisabled"] = "false" if enabled else "true"
endpoint = "/users"
response = self._get(endpoint, UserSchema(many=True), params=params)
return response
def set_global_roles(self, user_id, global_roles):
endpoint = "/user/{}/roles".format(user_id)
response = self._put(
endpoint, UserSchema(), json={"roles": global_roles}
)
return response
```
#### File: faculty/datasets/util.py
```python
import posixpath
class DatasetsError(Exception):
pass
def rationalise_path(path):
# All paths should be relative to root
path = posixpath.join("/", path)
normed = posixpath.normpath(path)
if path.endswith("/") and not normed.endswith("/"):
normed += "/"
return normed
def get_relative_path(parent_directory, directory):
parent_directory = rationalise_path(parent_directory)
directory = rationalise_path(directory)
if not directory.startswith(parent_directory):
tpl = "{} is not a sub path of {}"
raise ValueError(tpl.format(directory, parent_directory))
# Remove the root
relative_path = posixpath.relpath(directory, parent_directory)
return relative_path
```
#### File: tests/clients/test_account.py
```python
import uuid
import pytest
from marshmallow import ValidationError
from faculty.clients.account import (
AccountClient,
Account,
AccountSchema,
AuthenticationResponse,
AuthenticationResponseSchema,
)
USER_ID = uuid.uuid4()
USERNAME = "joe_bloggs"
ACCOUNT = Account(user_id=USER_ID, username=USERNAME)
ACCOUNT_BODY = {"userId": str(USER_ID), "username": USERNAME}
def test_account_schema():
data = AccountSchema().load(ACCOUNT_BODY)
assert data == ACCOUNT
@pytest.mark.parametrize(
"data", [{}, {"userId": "not-a-uuid", "username": USERNAME}]
)
def test_account_schema_invalid(data):
with pytest.raises(ValidationError):
AccountSchema().load(data)
def test_authentication_response_schema():
data = AuthenticationResponseSchema().load({"account": ACCOUNT_BODY})
assert data == AuthenticationResponse(account=ACCOUNT)
@pytest.mark.parametrize("data", [{}, {"account": "not-an-account"}])
def test_authentication_response_schema_invalid(data):
with pytest.raises(ValidationError):
AuthenticationResponseSchema().load(data)
def test_account_client_authenticated_account(mocker):
mocker.patch.object(
AccountClient,
"_get",
return_value=AuthenticationResponse(account=ACCOUNT),
)
schema_mock = mocker.patch(
"faculty.clients.account.AuthenticationResponseSchema"
)
client = AccountClient(mocker.Mock())
assert client.authenticated_account() == ACCOUNT
AccountClient._get.assert_called_once_with(
"/authenticate", schema_mock.return_value
)
def test_account_client_authenticated_user_id(mocker):
mocker.patch.object(
AccountClient, "authenticated_account", return_value=ACCOUNT
)
client = AccountClient(mocker.Mock())
assert client.authenticated_user_id() == USER_ID
AccountClient.authenticated_account.assert_called_once_with()
``` |
{
"source": "jkeen871/pyvmomi-community-samples",
"score": 3
} |
#### File: pyvmomi-community-samples/samples/add_disk_to_vm.py
```python
from pyVmomi import vim
from pyVmomi import vmodl
from pyVim.connect import SmartConnect, Disconnect
import atexit
import argparse
import getpass
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('-v', '--vm-name',
required=False,
action='store',
help='name of the vm')
parser.add_argument('--uuid',
required=False,
action='store',
help='vmuuid of vm')
parser.add_argument('--disk-type',
required=False,
action='store',
default='thin',
help='thick or thin')
parser.add_argument('--disk-size',
required=True,
action='store',
help='disk size, in GB, to add to the VM')
args = parser.parse_args()
if not args.password:
args.password = <PASSWORD>(
prompt='Enter password')
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def add_disk(vm, si, disk_size, disk_type):
spec = vim.vm.ConfigSpec()
# get all disks on a VM, set unit_number to the next available
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
# unit_number 7 reserved for scsi controller
if unit_number == 7:
unit_number += 1
if unit_number >= 16:
print "we don't support this many disks"
return
# add disk here
dev_changes = []
new_disk_kb = int(disk_size) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = \
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if disk_type == 'thin':
disk_spec.device.backing.thinProvisioned = True
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = 1000
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
vm.ReconfigVM_Task(spec=spec)
print "%sGB disk added to %s" % (disk_size, vm.config.name)
def main():
args = get_args()
# connect this thing
si = SmartConnect(
host=args.host,
user=args.user,
pwd=<PASSWORD>,
port=args.port)
# disconnect this thing
atexit.register(Disconnect, si)
vm = None
if args.uuid:
search_index = si.content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
content = si.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_disk(vm, si, args.disk_size, args.disk_type)
else:
print "VM not found"
# start this thing
if __name__ == "__main__":
main()
```
#### File: pyvmomi-community-samples/samples/list_datastore_info.py
```python
import argparse
import atexit
import json
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
from tools import cli
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=True, action='store',
help='Password to use when connecting to host')
parser.add_argument('-j', '--json', default=False, action='store_true',
help='Output to JSON')
args = parser.parse_args()
return args
# http://stackoverflow.com/questions/1094841/
def sizeof_fmt(num):
"""
Returns the human readable version of a file size
:param num:
:return:
"""
for item in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0:
return "%3.1f%s" % (num, item)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def print_fs(host_fs):
"""
Prints the host file system volume info
:param host_fs:
:return:
"""
print "{}\t{}\t".format("Datastore: ", host_fs.volume.name)
print "{}\t{}\t".format("UUID: ", host_fs.volume.uuid)
print "{}\t{}\t".format("Capacity: ", sizeof_fmt(
host_fs.volume.capacity))
print "{}\t{}\t".format("VMFS Version: ", host_fs.volume.version)
print "{}\t{}\t".format("Is Local VMFS: ", host_fs.volume.local)
print "{}\t{}\t".format("SSD: ", host_fs.volume.ssd)
def main():
"""
Simple command-line program for listing all ESXi datastores and their
associated devices
"""
args = get_args()
cli.prompt_for_password(args)
try:
service_instance = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
if not service_instance:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# Search for all ESXi hosts
objview = content.viewManager.CreateContainerView(content.rootFolder,
[vim.HostSystem],
True)
esxi_hosts = objview.view
objview.Destroy()
datastores = {}
for esxi_host in esxi_hosts:
if not args.json:
print "{}\t{}\t\n".format("ESXi Host: ", esxi_host.name)
# All Filesystems on ESXi host
storage_system = esxi_host.configManager.storageSystem
host_file_sys_vol_mount_info = \
storage_system.fileSystemVolumeInfo.mountInfo
datastore_dict = {}
# Map all filesystems
for host_mount_info in host_file_sys_vol_mount_info:
# Extract only VMFS volumes
if host_mount_info.volume.type == "VMFS":
extents = host_mount_info.volume.extent
if not args.json:
print_fs(host_mount_info)
else:
datastore_details = {
'uuid': host_mount_info.volume.uuid,
'capacity': host_mount_info.volume.capacity,
'vmfs_version': host_mount_info.volume.version,
'local': host_mount_info.volume.local,
'ssd': host_mount_info.volume.ssd
}
extent_arr = []
extent_count = 0
for extent in extents:
if not args.json:
print "{}\t{}\t".format(
"Extent[" + str(extent_count) + "]:",
extent.diskName)
extent_count += 1
else:
# create an array of the devices backing the given
# datastore
extent_arr.append(extent.diskName)
# add the extent array to the datastore info
datastore_details['extents'] = extent_arr
# associate datastore details with datastore name
datastore_dict[host_mount_info.volume.name] = \
datastore_details
if not args.json:
print
# associate ESXi host with the datastore it sees
datastores[esxi_host.name] = datastore_dict
if args.json:
print json.dumps(datastores)
except vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
```
#### File: pyvmomi-community-samples/samples/pyvmomi-to-suds.py
```python
import argparse
import cookielib
import getpass
import suds
# pyvmomi-to-suds.py
#
# Demonstrates how to move a session between the pyVmomi client and the
# generated SOAP suds client. We leverage the suds library's use of cookielib
# to manipulate its cookies to match the pyVmomi cookies. That causes vCenter
# to identify both clients as the same user.
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443", default=443)
args = parser.parse_args()
if args.password:
password = <PASSWORD>.password
else:
password = <PASSWORD>(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
url = "https://%s/sdk/vimService.wsdl" % args.host
client = suds.client.Client(url, location=url)
def get_current_session(client):
si = suds.sudsobject.Property("ServiceInstance")
si._type = "ServiceInstance"
sc = client.service.RetrieveServiceContent(si)
property_filter_spec = client.factory.create('ns0:PropertyFilterSpec')
property_spec = client.factory.create('ns0:PropertySpec')
property_spec.pathSet = ['currentSession']
property_spec.type = "SessionManager"
property_filter_spec.propSet = [property_spec]
object_spec = client.factory.create('ns0:ObjectSpec')
object_spec.obj = sc.sessionManager
object_spec.skip = False
property_filter_spec.objectSet = [object_spec]
options = client.factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
results = client.service.RetrievePropertiesEx(sc.propertyCollector,
specSet=[
property_filter_spec],
options=options)
def get_property(self, name):
for obj in self.objects:
if not hasattr(obj, 'propSet'):
return None
for prop in obj.propSet:
if prop.name == name:
return prop.val
results.__class__.get_property = get_property
return results.get_property('currentSession')
print "pyVmomi login... "
import pyVim.connect as connect
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
print "current session id: %s" % si.content.sessionManager.currentSession.key
pyvmomi_cookie = si._stub.cookie
print "current cookie contents: %s" % pyvmomi_cookie
VMWARE_COOKIE_NAME = 'vmware_soap_session'
def inject_vmware_cookie_suds(client, cookie_value, domain):
cookie = cookielib.Cookie(0,
VMWARE_COOKIE_NAME,
cookie_value,
None,
None,
domain,
None,
None,
"/",
None,
None,
None,
None,
None,
None,
None,
None,)
client.options.transport.cookiejar.set_cookie(cookie)
client.__class__.set_vmware_cookie = inject_vmware_cookie_suds
print "=" * 80
print "pyvmomi to suds"
si._stub.cookie = pyvmomi_cookie
# extracting the cookie value:
start_of_value = pyvmomi_cookie.index("=") + 1
end_of_value = pyvmomi_cookie.index(";")
cookie_value = pyvmomi_cookie[start_of_value:end_of_value]
session_id = si.content.sessionManager.currentSession.key
print "current pyVmomi session id: %s" % session_id
# injecting the cookie value:
client.set_vmware_cookie(cookie_value, args.host)
soap_session_id = get_current_session(client).key
print "current suds session id: %s" % soap_session_id
assert session_id == soap_session_id
``` |
{
"source": "jkeifer/arcpy-extensions",
"score": 3
} |
#### File: arcpy-extensions/arcpy_extensions/server_admin.py
```python
from __future__ import print_function
import os
import sys
import urllib
import urllib2
import httplib
import arcpy
import json
import re
from arcpy import env
class ServiceException(Exception):
pass
class AgsAdmin(object):
def __init__(self, server, port, token=None):
self.server = server
self.port = port
self.token = token
@classmethod
def connectWithToken(self, server, port, token):
return AgsAdmin(server, port, token)
@classmethod
def connectWithoutToken(self, server, port, adminUser, adminPass, expiration=60):
token = self.getToken(server, port, adminUser, adminPass, expiration=expiration)
return AgsAdmin(server, port, token)
@staticmethod
def getToken(server, port, adminUser, adminPass, expiration=60):
"""Get a token required for Admin changes"""
query_dict = {'username': adminUser,
'password': <PASSWORD>,
'expiration': str(expiration),
'client': 'requestip'}
query_string = urllib.urlencode(query_dict)
url = "http://{}:{}/arcgis/admin/generateToken".format(server, port)
token = json.loads(urllib.urlopen(url + "?f=json", query_string).read())
try:
return token["token"]
except KeyError:
raise ServiceException("No token returned. Check credientials.")
def stopStartDeleteService(self, command, servicename, folder=None):
"""
Function to stop, start or delete a service.
Requires token, server, and port.
command = Stop|Start|Delete
serviceList = List of services. A service must be in the <name>.<type> notation
"""
if folder:
if folder.endswith("/"):
pass
else:
folder = folder + "/"
else:
folder = ""
service = urllib.quote(servicename.encode('utf8'))
op_service_url = "http://{0}:{1}/arcgis/admin/services/{2}{3}/{4}?token={5}&f=json".format(self.server,
self.port,
folder,
service,
command,
self.token)
status = urllib2.urlopen(op_service_url, ' ').read()
if not 'success' in status:
raise ServiceException("Could not {0} service {1} successfully.".format(command, servicename))
else:
return 0
def stopService(self, servicename):
return self.stopStartDeleteService("Stop", servicename, folder=None)
def startService(self, servicename):
return self.stopStartDeleteService("Start", servicename, folder=None)
def deleteService(self, servicename):
return self.stopStartDeleteService("Delete", servicename, folder=None)
def servicesInFolder(self, foldername, namefilter=None):
"""
"""
# test if name filter is valid regex
if namefilter:
try:
re.compile(namefilter)
except re.error:
raise re.error("Specified namefilter argument must be a vaild regex. Aborting.")
listofservices = []
folderURL = "/arcgis/admin/services/" + foldername
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': self.token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(self.server, self.port)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
raise ServiceException("Could not read folder information.")
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
raise ServiceException("Error when reading folder information. " + str(data))
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
for item in dataObj['services']:
# if namefilter, check to see if name matches; if not, skip to next item
if namefilter:
if not re.search(namefilter, item['serviceName']):
continue
listofservices.append(item['serviceName'] + "." + item['type'])
return listofservices
def stopStartDeleteAllServicesInFolder(self, command, foldername, namefilter=None):
"""
"""
errorcount = 0
listofservices = self.servicesInFolder(foldername, namefilter=namefilter)
if not listofservices:
raise ServiceException("No services were found in the folder {0}.".format(foldername))
for service in listofservices:
try:
self.stopStartDeleteService(command, service, foldername)
except ServiceException as e:
print(e)
print("Failed to {0} service {1}.".format(command.lower(), service))
errorcount += 1
return errorcount
def stopAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Stop", foldername, namefilter=namefilter)
def startAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Start", foldername, namefilter=namefilter)
def deleteAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Delete", foldername, namefilter=namefilter)
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print("Error: JSON object returns an error. " + str(obj))
return False
else:
return True
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jkeifer/boto3-utils",
"score": 3
} |
#### File: boto3-utils/boto3utils/secrets.py
```python
import base64
import boto3
import json
def get_secret(secret_name):
""" Get secrets as a dictionary from Secrets Manager """
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(service_name='secretsmanager')
# Will throw a botocore.exceptions.ClientError for any of
# the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(secret)
``` |
{
"source": "jkeiren/mCRL2",
"score": 2
} |
#### File: tests/python/tools.py
```python
from subprocess import PIPE
import os.path
import re
from text_utility import read_text
def is_list_of(l, types):
if not isinstance(l, list):
return False
for x in l:
if not isinstance(x, types):
return False
return True
class Node:
def __init__(self, label, type, value):
self.label = label
self.type = type
self.value = value
return
def __str__(self):
return 'Node(label = {0}, type = {1}, value = {2})'.format(self.label, self.type, self.value)
def filename(self):
return '{}.{}'.format(self.label, self.type)
class Tool(object):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
assert is_list_of(input_nodes, Node)
assert is_list_of(output_nodes, Node)
import platform
self.label = label
self.name = name
self.toolpath = toolpath
self.input_nodes = input_nodes
self.output_nodes = output_nodes
self.args = args
self.executed = False
self.value = {}
if platform.system() == 'Windows':
# Don't display the Windows GPF dialog if the invoked program dies.
# See comp.os.ms-windows.programmer.win32
# How to suppress crash notification dialog?, <NAME> Jan 14,2004 -
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self.subprocess_flags = 0x8000000 #win32con.CREATE_NO_WINDOW?
else:
self.subprocess_flags = 0
# Raises an exception if the execution was aborted or produced an error
def check_execution(self, process, timeout, memlimit, returncode):
import platform
import popen
if process.user_time > timeout:
raise popen.TimeExceededError(process.user_time)
if process.max_virtual_memory > memlimit:
raise popen.MemoryExceededError(process.max_virtual_memory)
if returncode != 0:
print(self.stderr)
raise popen.ToolRuntimeError('Tool {} ended with return code {}'.format(self.name, returncode))
if platform.system() == 'Windows' and returncode == -1073740777:
raise popen.ToolRuntimeError('Tool {} failed with the return code STATUS_INVALID_CRUNTIME_PARAMETER (0xC0000417)'.format(self.name))
if platform.system() == 'Windows' and returncode == -1073741571:
raise popen.StackOverflowError(self.name)
if platform.system() == 'Linux' and returncode == -11:
raise popen.SegmentationFault(self.name)
if self.stderr and 'error' in self.stderr:
raise popen.ToolRuntimeError('Tool {} failed: {}'.format(self.name, self.stderr))
# If no_paths is True, then all paths in the command are excluded
def arguments(self, working_directory = None, no_paths = False):
if not working_directory:
working_directory = os.getcwd()
input_filenames = [node.filename() for node in self.input_nodes]
output_filenames = [node.filename() for node in self.output_nodes if node.type != 'Bool']
filenames = input_filenames + output_filenames
if not no_paths:
filenames = [os.path.join(working_directory, filename) for filename in filenames]
return filenames
def assign_outputs(self):
for node in self.output_nodes:
if node.type == 'text':
text = read_text(node.filename())
node.value = text
else:
node.value = 'executed'
# value[key] is an integer
def parse_number(self, text, key, regex):
m = re.search(regex, text)
if m != None:
self.value[key] = int(m.group(1))
# value[key] is an integer
def parse_numbers(self, text, key1, key2, regex):
m = re.search(regex, text)
if m != None:
self.value[key1] = int(m.group(1))
self.value[key2] = int(m.group(2))
# value[key] is a set of strings
# All occurrences of regex are processed
def parse_action(self, text, key, regex):
for m in re.finditer(regex, text):
if not key in self.value:
self.value[key] = set([])
self.value[key].add(m.group(1))
# value[key] is a boolean
# multiple regular expressions are checked
def parse_boolean_regexes(self, text, key, regexes):
result = False
for regex in regexes:
if re.search(regex, text, re.DOTALL) != None:
result = True
self.value[key] = result
# value[key] is a boolean
def parse_boolean(self, text, key, regex, negated_regex = None):
if negated_regex:
m = re.search(negated_regex, text, re.DOTALL)
if m != None:
self.value[key] = False
if regex:
m = re.search(regex, text, re.DOTALL)
if m != None:
self.value[key] = True
def parse_output(self):
text = self.stdout + self.stderr
self.parse_number(text, 'summand-count' , r'Number of summands : (\d+)')
self.parse_number(text, 'tau-summand-count' , r'Number of tau-summands : (\d+)')
self.parse_number(text, 'global-variable-count' , r'Number of declared global variables : (\d+)')
self.parse_number(text, 'process-parameter-count' , r'Number of process parameters : (\d+)')
self.parse_number(text, 'action-label-count' , r'Number of declared action labels : (\d+)')
self.parse_number(text, 'used-action-label-count' , r'Number of used actions : (\d+)')
self.parse_number(text, 'used-multi-action-count' , r'Number of used multi-actions : (\d+)')
self.parse_number(text, 'state-count' , r'Number of states: (\d+)')
self.parse_number(text, 'state-label-count' , r'Number of state labels: (\d+)')
self.parse_number(text, 'action-label-count' , r'Number of action labels: (\d+)')
self.parse_number(text, 'transition-count' , r'Number of transitions: (\d+)')
self.parse_number(text, 'equation-count' , r'Number of equations: (\d+)')
self.parse_number(text, 'mu-count' , r"Number of mu's: (\d+)")
self.parse_number(text, 'nu-count' , r"Number of nu's: (\d+)")
self.parse_number(text, 'block-nesting-depth' , r'Block nesting depth: (\d+)')
self.parse_number(text, 'vertex-count' , r'Number of vertices in the structure graph: (\d+)')
self.parse_numbers(text, 'confluent-tau-summand-count', 'tau-summand-count', r'(\d+) of (\d+) tau summands were found to be confluent')
self.parse_boolean(text, 'has-state-labels' , 'Has state labels.', 'Does not have state labels.')
self.parse_boolean(text, 'has-action-labels' , 'Has action labels.')
self.parse_boolean(text, 'is-deterministic' , 'LTS is deterministic.', 'LTS is not deterministic.')
self.parse_boolean(text, 'is-closed' , 'is closed', 'is not closed')
self.parse_boolean(text, 'is-well-formed' , 'well formed', 'not well formed')
self.parse_boolean(text, 'is-well-typed' , 'is well typed', 'is not well typed')
self.parse_boolean(text, 'result' , r'LTSs are strongly bisimilar', 'LTSs are not strongly bisimilar')
self.parse_boolean(text, 'result' , r'LTSs are branching bisimilar', 'LTSs are not branching bisimilar')
self.parse_boolean(text, 'result' , r'LTSs are equal \(branching bisimilarity using the almost-O\(m log n\) Groote/Wijs algorithm\)', r'LTSs are not equal \(branching bisimilarity using the almost-O\(m log n\) Groote/Wijs algorithm\)')
self.parse_boolean(text, 'result' , r'LTSs are equal \(branching bisimilarity using the O\(m log n\) Groote/Keiren/Jansen/Wijs algorithm\)', r'LTSs are not equal \(branching bisimilarity using the O\(m log n\) Groote/Keiren/Jansen/Wijs algorithm\)')
self.parse_boolean(text, 'result' , r'LTSs are divergence preserving branching bisimilar', 'LTSs are not divergence preserving branching bisimilar')
self.parse_boolean(text, 'result' , r'LTSs are weak bisimilar', 'LTSs are not weak bisimilar')
self.parse_boolean(text, 'result' , r'LTSs are divergence preserving weak bisimilar', 'LTSs are not divergence preserving weak bisimilar')
self.parse_boolean(text, 'result' , r'LTSs are strongly simulation equivalent', 'LTSs are not strongly simulation equivalent')
self.parse_boolean(text, 'result' , r'LTSs are strongly trace equivalent', 'LTSs are not strongly trace equivalent')
self.parse_boolean(text, 'result' , r'LTSs are weak trace equivalent', 'LTSs are not weak trace equivalent')
self.parse_boolean(text, 'result' , r'LTSs are equal', 'LTSs are not equal')
self.parse_boolean(text, 'result' , r'is included in', 'is not included in')
self.parse_action(text, 'actions' , r"Detected action '(\w+)'")
self.parse_action(text, 'actions' , r"Action '(\w+)' found")
self.parse_boolean_regexes(text, 'has-deadlock' , [r'deadlock-detect: deadlock found', r'Deadlock found'])
self.parse_boolean_regexes(text, 'has-divergence' , [r'divergence-detect: divergence found', r'Divergent state found'])
self.parse_boolean(text, 'has-nondeterminism' , r'Nondeterministic state found')
# If no_paths is True, then all paths in the command are excluded
def command(self, working_directory = None, no_paths = False):
args = self.arguments(working_directory, no_paths)
name = self.name
if not no_paths:
name = os.path.join(self.toolpath, name)
return ' '.join([name] + args + self.args)
def check_exists(self, name):
import platform
if os.path.exists(name):
return True
if not name.endswith('.exe') and platform.system() == 'Windows':
if os.path.exists(name + '.exe'):
return True
return False
def execute(self, timeout, memlimit, verbose):
import popen
args = self.arguments()
name = os.path.join(self.toolpath, self.name)
if verbose:
print('Executing ' + ' '.join([name] + args + self.args))
if not self.check_exists(name):
raise popen.ToolNotFoundError(name)
process = popen.Popen([name] + args + self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=self.subprocess_flags, maxVirtLimit=memlimit, usrTimeLimit=timeout)
input = None
stdout, stderr = process.communicate(input)
self.stdout = stdout.decode("utf-8")
self.stderr = stderr.decode("utf-8")
self.executed = True
self.user_time = process.user_time
self.max_virtual_memory = process.max_virtual_memory
self.check_execution(process, timeout, memlimit, process.returncode)
self.assign_outputs()
self.parse_output()
return process.returncode
def __str__(self):
import io
out = io.StringIO()
out.write('label = ' + str(self.label) + '\n')
out.write('name = ' + str(self.name) + '\n')
out.write('input = [{0}]\n'.format(', '.join([str(x) for x in self.input_nodes])))
out.write('output = [{0}]\n'.format(', '.join([str(x) for x in self.output_nodes])))
out.write('args = ' + str(self.args) + '\n')
out.write('stderr = ' + str(self.stderr) + '\n')
out.write('executed = ' + str(self.executed) + '\n')
return out.getvalue()
class SolveTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
super(SolveTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def assign_outputs(self):
text = self.stdout.strip() + self.stderr.strip()
if text.endswith('true'):
value = True
elif text.endswith('false'):
value = False
else:
value = None
self.value['solution'] = value
class Lps2PbesTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
assert len(input_nodes) == 2
assert len(output_nodes) == 1
super(Lps2PbesTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def arguments(self, working_directory = None, no_paths = False):
args = super(Lps2PbesTool, self).arguments(working_directory, no_paths)
args.insert(1, '-f')
return args
class Lts2PbesTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
assert len(input_nodes) == 2
assert len(output_nodes) == 1
super(Lts2PbesTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def arguments(self, working_directory = None, no_paths = False):
args = super(Lts2PbesTool, self).arguments(working_directory, no_paths)
args.insert(1, '-f')
return args
class Lts2LpsTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
assert len(input_nodes) == 2
assert len(output_nodes) == 1
super(Lts2LpsTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def arguments(self, working_directory = None, no_paths = False):
args = super(Lts2LpsTool, self).arguments(working_directory, no_paths)
args.insert(1, '-l')
return args
class Lps2LtsDeprecatedTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
super(Lps2LtsDeprecatedTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def assign_outputs(self):
self.value['has-deadlock'] = None
self.value['has-nondeterminism'] = None
self.value['has-divergence'] = None
self.value['actions'] = set([])
super(Lps2LtsDeprecatedTool, self).assign_outputs()
class Lps2LtsTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
super(Lps2LtsTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def assign_outputs(self):
self.value['has-deadlock'] = None
self.value['has-nondeterminism'] = None
self.value['has-divergence'] = None
self.value['actions'] = set([])
super(Lps2LtsTool, self).assign_outputs()
class PbesSolveTool(Tool):
def __init__(self, label, name, toolpath, input_nodes, output_nodes, args):
super(PbesSolveTool, self).__init__(label, name, toolpath, input_nodes, output_nodes, args)
def arguments(self, working_directory = None, no_paths = False):
args = super(PbesSolveTool, self).arguments(working_directory, no_paths)
# counter example generation
if len(self.input_nodes) > 1:
args[1] = '--file={}'.format(args[1])
if len(self.output_nodes) > 0:
args[2] = '--evidence-file={}'.format(args[2])
return args
def assign_outputs(self):
text = self.stdout.strip() + self.stderr.strip()
# N.B. In verbose mode, the solution may appear at the start.
if text.startswith('true') or text.endswith('true'):
value = True
elif text.startswith('false') or text.endswith('false'):
value = False
else:
value = None
self.value['solution'] = value
# mark the evidence file as executed
if len(self.output_nodes) == 1:
self.output_nodes[0].value = 'executed'
class ToolFactory(object):
def create_tool(self, label, name, toolpath, input_nodes, output_nodes, args):
if name == 'lps2pbes':
return Lps2PbesTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name == 'lts2pbes':
return Lts2PbesTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name in ['generatelts', 'lps2lts']:
return Lps2LtsTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name == 'lps2ltsdeprecated':
return Lps2LtsDeprecatedTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name == 'lts2lps':
return Lts2LpsTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name in ['pbespgsolve', 'bessolve']:
return SolveTool(label, name, toolpath, input_nodes, output_nodes, args)
elif name in ['pbes2bool', 'pbessolve', 'pbessolvesymbolic', 'pbessymbolicbisim']:
return PbesSolveTool(label, name, toolpath, input_nodes, output_nodes, args)
return Tool(label, name, toolpath, input_nodes, output_nodes, args)
```
#### File: tests/random/random_testing.py
```python
import os
import os.path
import random
import re
import sys
import traceback
sys.path += [os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'python'))]
import random_state_formula_generator
from random_bes_generator import make_bes
from random_pbes_generator import make_pbes
import random_process_expression
from testing import YmlTest
from text_utility import write_text
MCRL2_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
MCRL2_INSTALL_DIR = os.path.join(MCRL2_ROOT, 'install', 'bin')
def ymlfile(file):
return '{}/tests/specifications/{}.yml'.format(MCRL2_ROOT, file)
def mcrl2file(file):
return os.path.join(MCRL2_ROOT, file)
class RandomTest(YmlTest):
def __init__(self, name, ymlfile, settings):
super(RandomTest, self).__init__(name, ymlfile, [], settings)
# create input files for the random test, and add the filenames to self.inputfiles
def create_inputfiles(self, runpath = '.'):
raise NotImplementedError
# removes input files that are in the runpath directory
def remove_inputfiles(self, runpath = '.'):
for filename in self.inputfiles:
if os.path.abspath(runpath) == os.path.abspath(os.path.dirname(filename)):
os.remove(filename)
def execute(self, runpath = '.'):
self.create_inputfiles(runpath)
super(RandomTest, self).execute(runpath)
self.remove_inputfiles(runpath)
class ProcessTest(RandomTest):
def __init__(self, name, ymlfile, settings):
super(ProcessTest, self).__init__(name, ymlfile, settings)
self.actions = ['a', 'b', 'c', 'd']
self.process_identifiers = ['P', 'Q', 'R']
self.process_size = 13
self.parallel_operator_generators = random_process_expression.default_parallel_operator_generators
self.process_expression_generators = random_process_expression.default_process_expression_generators
self.init = None
self.generate_process_parameters = False
def create_inputfiles(self, runpath = '.'):
filename = '{0}.mcrl2'.format(self.name, self.settings)
p = random_process_expression.make_process_specification(self.parallel_operator_generators, self.process_expression_generators, self.actions, self.process_identifiers, self.process_size, self.init, self.generate_process_parameters)
write_text(filename, str(p))
self.inputfiles += [filename]
# generates stochastic random processes
class StochasticProcessTest(ProcessTest):
def __init__(self, name, ymlfile, settings):
super(StochasticProcessTest, self).__init__(name, ymlfile, settings)
self.process_expression_generators = {
random_process_expression.make_action : 8,
random_process_expression.make_delta : 1,
random_process_expression.make_tau : 1,
random_process_expression.make_process_instance: 2,
random_process_expression.make_sum : 2,
random_process_expression.make_if_then : 2,
random_process_expression.make_if_then_else : 2,
random_process_expression.make_choice : 5,
random_process_expression.make_seq : 5,
random_process_expression.make_multi_action : 1,
random_process_expression.make_dist : 3,
}
# generates random process with higher probability of tau transitions
class ProcessTauTest(ProcessTest):
def __init__(self, name, testfile, settings):
super(ProcessTauTest, self).__init__(name, testfile, settings)
self.actions = ['a', 'b', 'c']
self.init = 'hide({a}, allow({a, b, c}, P || Q || R))'
self.process_expression_generators = {
random_process_expression.make_action: 8,
random_process_expression.make_delta: 1,
random_process_expression.make_tau: 4,
random_process_expression.make_process_instance: 1,
random_process_expression.make_sum: 0,
random_process_expression.make_if_then: 0,
random_process_expression.make_if_then_else: 0,
random_process_expression.make_choice: 5,
random_process_expression.make_seq: 5,
random_process_expression.make_multi_action: 1,
random_process_expression.make_dist: 0,
}
class AlphabetReduceTest(ProcessTest):
def __init__(self, name, settings):
super(AlphabetReduceTest, self).__init__(name, ymlfile('alphabet-reduce'), settings)
self.actions = ['a', 'b', 'c', 'd', 'e']
class LpsSuminstTest(ProcessTest):
def __init__(self, name, settings):
super(LpsSuminstTest, self).__init__(name, ymlfile('lpssuminst'), settings)
class LpsSumelmTest(ProcessTest):
def __init__(self, name, settings):
super(LpsSumelmTest, self).__init__(name, ymlfile('lpssumelm'), settings)
class LpsParelmTest(ProcessTest):
def __init__(self, name, settings):
super(LpsParelmTest, self).__init__(name, ymlfile('lpsparelm'), settings)
self.generate_process_parameters = True
class LpsOnePointRuleRewriteTest(ProcessTest):
def __init__(self, name, settings):
super(LpsOnePointRuleRewriteTest, self).__init__(name, ymlfile('lpstransform'), settings)
self.add_command_line_options('t2', ['-alps-one-point-rule-rewriter'])
class LpsConfcheckTest(ProcessTauTest):
def __init__(self, name, confluence_type, settings):
self.option_map = { 'commutative' : 'C',
'commutative-disjoint' : 'c',
'disjoint' : 'd',
'triangular' : 'T',
'trivial' : 'Z'
}
assert confluence_type in self.option_map
super(LpsConfcheckTest, self).__init__(name, ymlfile('lpsconfcheck'), settings)
self.add_command_line_options('t2', ['-x' + self.option_map[confluence_type]])
class LtscompareTest(ProcessTauTest):
def __init__(self, name, equivalence_type, settings):
assert equivalence_type in ['bisim', 'bisim-gv', 'bisim-gjkw', 'branching-bisim', 'branching-bisim-gv', 'branching-bisim-gjkw', 'dpbranching-bisim', 'dpbranching-bisim-gv', 'dpbranching-bisim-gjkw', 'weak-bisim', 'dpweak-bisim', 'sim', 'ready-sim' , 'trace', 'weak-trace']
super(LtscompareTest, self).__init__(name, ymlfile('ltscompare'), settings)
self.add_command_line_options('t3', ['-e' + equivalence_type])
self.add_command_line_options('t4', ['-e' + equivalence_type])
class StochasticLtscompareTest(StochasticProcessTest):
def __init__(self, name, settings):
super(StochasticLtscompareTest, self).__init__(name, ymlfile('stochastic-ltscompare'), settings)
class BisimulationTest(ProcessTauTest):
def __init__(self, name, equivalence_type, settings):
assert equivalence_type in ['bisim', 'bisim-gv', 'bisim-gjkw', 'branching-bisim', 'branching-bisim-gv', 'branching-bisim-gjkw', 'weak-bisim']
super(BisimulationTest, self).__init__(name, ymlfile('bisimulation'), settings)
self.add_command_line_options('t3', ['-e' + equivalence_type])
self.add_command_line_options('t4', ['-e' + equivalence_type])
if equivalence_type in ['branching-bisim-gv', 'branching-bisim-gjkw']:
self.add_command_line_options('t7', ['-bbranching-bisim'])
elif equivalence_type in ['bisim', 'bisim-gv', 'bisim-gjkw']:
self.add_command_line_options('t7', ['-bstrong-bisim'])
else:
self.add_command_line_options('t7', ['-b' + equivalence_type])
class Lps2ltsAlgorithmsTest(ProcessTauTest):
def __init__(self, name, settings):
super(Lps2ltsAlgorithmsTest, self).__init__(name, ymlfile('lps2lts-algorithms'), settings)
# randomly choose an algorithm
actions = random.choice(['a', 'a,b', 'a,b,c'])
options = [random.choice(['--deadlock', '--divergence', '--nondeterminism', '--action={}'.format(actions)])]
options = [random.choice(['--deadlock', '--nondeterminism', '--action={}'.format(actions)])]
if 'divergence' in options[0]:
tau_actions = random.choice(['', '', 'b', 'b,c'])
if tau_actions:
options.append('--tau={}'.format(tau_actions))
self.add_command_line_options('t2', options)
self.add_command_line_options('t3', options)
class LpsConstelmTest(ProcessTest):
def __init__(self, name, settings):
super(LpsConstelmTest, self).__init__(name, ymlfile('lpsconstelm'), settings)
self.generate_process_parameters = True
class LpsBinaryTest(ProcessTest):
def __init__(self, name, settings):
super(LpsBinaryTest, self).__init__(name, ymlfile('lpsbinary'), settings)
self.generate_process_parameters = True
class LpsstategraphTest(ProcessTest):
def __init__(self, name, settings):
super(LpsstategraphTest, self).__init__(name, ymlfile('lpsstategraph'), settings)
self.generate_process_parameters = True
class Lps2pbesTest(ProcessTest):
def __init__(self, name, settings):
super(Lps2pbesTest, self).__init__(name, ymlfile('lps2pbes'), settings)
def create_inputfiles(self, runpath = '.'):
super(Lps2pbesTest, self).create_inputfiles(runpath)
self.inputfiles.append(mcrl2file('examples/modal-formulas/nodeadlock.mcf'))
class Lts2pbesTest(ProcessTest):
def __init__(self, name, settings):
super(Lts2pbesTest, self).__init__(name, ymlfile('lts2pbes'), settings)
def create_inputfiles(self, runpath = '.'):
super(Lts2pbesTest, self).create_inputfiles(runpath)
self.inputfiles.append(mcrl2file('examples/modal-formulas/nodeadlock.mcf'))
class PbesTest(RandomTest):
def __init__(self, name, ymlfile, settings):
super(PbesTest, self).__init__(name, ymlfile, settings)
self.equation_count = 4
self.atom_count = 4
self.propvar_count = 3
self.use_quantifiers = True
self.use_integers = True
def create_inputfiles(self, runpath = '.'):
filename = '{0}.txt'.format(self.name)
p = make_pbes(self.equation_count, self.atom_count, self.propvar_count, self.use_quantifiers, use_integers=self.use_integers)
write_text(filename, str(p))
self.inputfiles += [filename]
# N.B. does not work yet due to unusable abstraction map
class PbesabsintheTest(PbesTest):
def __init__(self, name, settings):
super(PbesabsintheTest, self).__init__(name, ymlfile('pbesabsinthe'), settings)
# N.B. This test has been disabled, since the tool has been deprecated.
class PbesabstractTest(PbesTest):
def __init__(self, name, settings):
super(PbesabstractTest, self).__init__(name, ymlfile('pbesabstract'), settings)
class PbesbddsolveTest(PbesTest):
def __init__(self, name, settings):
super(PbesbddsolveTest, self).__init__(name, ymlfile('pbesbddsolve'), settings)
self.use_integers = False
self.use_quantifiers = False
class PbesconstelmTest(PbesTest):
def __init__(self, name, settings):
super(PbesconstelmTest, self).__init__(name, ymlfile('pbesconstelm'), settings)
class PbesparelmTest(PbesTest):
def __init__(self, name, settings):
super(PbesparelmTest, self).__init__(name, ymlfile('pbesparelm'), settings)
class PbespareqelmTest(PbesTest):
def __init__(self, name, settings):
super(PbespareqelmTest, self).__init__(name, ymlfile('pbespareqelm'), settings)
class Pbespor1Test(PbesTest):
def __init__(self, name, settings):
super(Pbespor1Test, self).__init__(name, ymlfile('pbespor1'), settings)
class Pbespor2Test(ProcessTest):
def __init__(self, name, settings):
super(Pbespor2Test, self).__init__(name, ymlfile('pbespor2'), settings)
def create_inputfiles(self, runpath = '.'):
super(Pbespor2Test, self).create_inputfiles(runpath)
filename = '{0}.mcf'.format(self.name, self.settings)
formula = random_state_formula_generator.make_modal_formula()
write_text(filename, str(formula))
self.inputfiles += [filename]
class PbesrewrTest(PbesTest):
def __init__(self, name, rewriter, settings):
super(PbesrewrTest, self).__init__(name, ymlfile('pbesrewr'), settings)
self.add_command_line_options('t2', ['-p' + rewriter])
class PbestransformTest(PbesTest):
def __init__(self, name, rewriter, settings):
super(PbestransformTest, self).__init__(name, ymlfile('pbestransform'), settings)
self.add_command_line_options('t2', ['-a' + rewriter])
class PbesinstTest(PbesTest):
def __init__(self, name, options, settings):
super(PbesinstTest, self).__init__(name, ymlfile('pbesinst'), settings)
self.add_command_line_options('t2', options)
class PbespgsolveTest(PbesTest):
def __init__(self, name, settings):
super(PbespgsolveTest, self).__init__(name, ymlfile('pbespgsolve'), settings)
class PbesstategraphTest(PbesTest):
def __init__(self, name, settings):
super(PbesstategraphTest, self).__init__(name, ymlfile('pbesstategraph'), settings)
class PbessymbolicbisimTest(PbesTest):
def __init__(self, name, settings):
super(PbessymbolicbisimTest, self).__init__(name, ymlfile('pbessymbolicbisim'), settings)
class PbessolvesymbolicTest(PbesTest):
def __init__(self, name, settings):
super(PbessolvesymbolicTest, self).__init__(name, ymlfile('pbessolvesymbolic'), settings)
class Pbes2boolTest(PbesTest):
def __init__(self, name, settings):
super(Pbes2boolTest, self).__init__(name, ymlfile('pbessolve'), settings)
class Pbes2boolDepthFirstTest(PbesTest):
def __init__(self, name, settings):
super(Pbes2boolDepthFirstTest, self).__init__(name, ymlfile('pbessolve'), settings)
self.add_command_line_options('t2', ['-zdepth-first'])
self.add_command_line_options('t3', ['-zdepth-first'])
self.add_command_line_options('t4', ['-zdepth-first'])
self.add_command_line_options('t5', ['-zdepth-first'])
self.add_command_line_options('t6', ['-zdepth-first'])
self.add_command_line_options('t7', ['-zdepth-first'])
self.add_command_line_options('t8', ['-zdepth-first'])
class Pbes2bool_counter_exampleTest(ProcessTest):
def __init__(self, name, optimization, settings):
super(Pbes2bool_counter_exampleTest, self).__init__(name, ymlfile('pbessolve-counter-example'), settings)
if optimization in [4, 5]:
self.add_command_line_options('t3', ['-l{}'.format(optimization), '--aggressive', '--prune-todo-list'])
else:
self.add_command_line_options('t3', ['-l{}'.format(optimization), '--prune-todo-list'])
def create_inputfiles(self, runpath = '.'):
super(Pbes2bool_counter_exampleTest, self).create_inputfiles(runpath)
filename = '{0}.mcf'.format(self.name, self.settings)
formula = random_state_formula_generator.make_modal_formula()
write_text(filename, str(formula))
self.inputfiles += [filename]
class Pbes_unify_parametersTest(PbesTest):
def __init__(self, name, settings):
super(Pbes_unify_parametersTest, self).__init__(name, ymlfile('pbes-unify-parameters'), settings)
class Pbes_srfTest(PbesTest):
def __init__(self, name, settings):
super(Pbes_srfTest, self).__init__(name, ymlfile('pbes-srf'), settings)
# N.B does not work due to unknown expressions (F_or)
class SymbolicExplorationTest(PbesTest):
def __init__(self, name, settings):
super(SymbolicExplorationTest, self).__init__(name, ymlfile('symbolic_exploration'), settings)
class BesTest(RandomTest):
def __init__(self, name, ymlfile, settings):
super(BesTest, self).__init__(name, ymlfile, settings)
self.equation_count = 4
self.term_size = 3
def create_inputfiles(self, runpath = '.'):
filename = '{0}.txt'.format(self.name, self.settings)
p = make_bes(self.equation_count, self.term_size)
write_text(filename, str(p))
self.inputfiles += [filename]
class BessolveTest(BesTest):
def __init__(self, name, settings):
super(BessolveTest, self).__init__(name, ymlfile('bessolve'), settings)
available_tests = {
'alphabet-reduce' : lambda name, settings: AlphabetReduceTest(name, settings) ,
'lpssuminst' : lambda name, settings: LpsSuminstTest(name, settings) ,
'lpssumelm' : lambda name, settings: LpsSumelmTest(name, settings) ,
'lpsparelm' : lambda name, settings: LpsParelmTest(name, settings) ,
'lps-quantifier-one-point' : lambda name, settings: LpsOnePointRuleRewriteTest(name, settings) ,
'lpsconfcheck-commutative' : lambda name, settings: LpsConfcheckTest(name, 'commutative', settings) ,
'lpsconfcheck-commutative-disjoint' : lambda name, settings: LpsConfcheckTest(name, 'commutative-disjoint', settings) ,
'lpsconfcheck-disjoint' : lambda name, settings: LpsConfcheckTest(name, 'disjoint', settings) ,
'lpsconfcheck-triangular' : lambda name, settings: LpsConfcheckTest(name, 'triangular', settings) ,
'lpsconfcheck-trivial' : lambda name, settings: LpsConfcheckTest(name, 'trivial', settings) ,
'lpsconstelm' : lambda name, settings: LpsConstelmTest(name, settings) ,
'lpsbinary' : lambda name, settings: LpsBinaryTest(name, settings) ,
'lps2lts-algorithms' : lambda name, settings: Lps2ltsAlgorithmsTest(name, settings) ,
'lps2pbes' : lambda name, settings: Lps2pbesTest(name, settings) ,
'lpsstategraph' : lambda name, settings: LpsstategraphTest(name, settings) ,
'lts2pbes' : lambda name, settings: Lts2pbesTest(name, settings) ,
'ltscompare-bisim' : lambda name, settings: LtscompareTest(name, 'bisim', settings) ,
'ltscompare-bisim-gv' : lambda name, settings: LtscompareTest(name, 'bisim-gv', settings) ,
'ltscompare-bisim-gjkw' : lambda name, settings: LtscompareTest(name, 'bisim-gjkw', settings) ,
'ltscompare-branching-bisim' : lambda name, settings: LtscompareTest(name, 'branching-bisim', settings) ,
'ltscompare-branching-bisim-gv' : lambda name, settings: LtscompareTest(name, 'branching-bisim-gv', settings) ,
'ltscompare-branching-bisim-gjkw' : lambda name, settings: LtscompareTest(name, 'branching-bisim-gjkw', settings) ,
'ltscompare-dpbranching-bisim' : lambda name, settings: LtscompareTest(name, 'dpbranching-bisim', settings) ,
'ltscompare-dpbranching-bisim-gv' : lambda name, settings: LtscompareTest(name, 'dpbranching-bisim-gv', settings) ,
'ltscompare-dpbranching-bisim-gjkw' : lambda name, settings: LtscompareTest(name, 'dpbranching-bisim-gjkw', settings) ,
'ltscompare-weak-bisim' : lambda name, settings: LtscompareTest(name, 'weak-bisim', settings) ,
'ltscompare-dpweak-bisim' : lambda name, settings: LtscompareTest(name, 'dpweak-bisim', settings) ,
'ltscompare-sim' : lambda name, settings: LtscompareTest(name, 'sim', settings) ,
'ltscompare-ready-sim' : lambda name, settings: LtscompareTest(name, 'ready-sim', settings) ,
'ltscompare-trace' : lambda name, settings: LtscompareTest(name, 'trace', settings) ,
'ltscompare-weak-trace' : lambda name, settings: LtscompareTest(name, 'weak-trace', settings) ,
'bisimulation-bisim' : lambda name, settings: BisimulationTest(name, 'bisim', settings) ,
'bisimulation-bisim-gv' : lambda name, settings: BisimulationTest(name, 'bisim-gv', settings) ,
'bisimulation-bisim-gjkw' : lambda name, settings: BisimulationTest(name, 'bisim-gjkw', settings) ,
'bisimulation-branching-bisim' : lambda name, settings: BisimulationTest(name, 'branching-bisim', settings) ,
'bisimulation-branching-bisim-gv' : lambda name, settings: BisimulationTest(name, 'branching-bisim-gv', settings) ,
'bisimulation-branching-bisim-gjkw' : lambda name, settings: BisimulationTest(name, 'branching-bisim-gjkw', settings) ,
'bisimulation-weak-bisim' : lambda name, settings: BisimulationTest(name, 'weak-bisim', settings) ,
'pbesconstelm' : lambda name, settings: PbesconstelmTest(name, settings) ,
'pbesparelm' : lambda name, settings: PbesparelmTest(name, settings) ,
'pbespareqelm' : lambda name, settings: PbespareqelmTest(name, settings) ,
'pbespor2' : lambda name, settings: Pbespor2Test(name, settings) ,
'pbesrewr-simplify' : lambda name, settings: PbesrewrTest(name, 'simplify', settings) ,
'pbesrewr-pfnf' : lambda name, settings: PbesrewrTest(name, 'pfnf', settings) ,
'pbesrewr-quantifier-all' : lambda name, settings: PbesrewrTest(name, 'quantifier-all', settings) ,
'pbesrewr-quantifier-finite' : lambda name, settings: PbesrewrTest(name, 'quantifier-finite', settings) ,
'pbesrewr-quantifier-inside' : lambda name, settings: PbesrewrTest(name, 'quantifier-inside', settings) ,
'pbesrewr-quantifier-one-point' : lambda name, settings: PbesrewrTest(name, 'quantifier-one-point', settings) ,
'pbesrewr-data-rewriter' : lambda name, settings: PbestransformTest(name, 'pbes-data-rewriter', settings) ,
'pbesrewr-simplify-rewriter' : lambda name, settings: PbestransformTest(name, 'pbes-simplify-rewriter', settings) ,
'pbesrewr-simplify-data-rewriter' : lambda name, settings: PbestransformTest(name, 'pbes-simplify-data-rewriter', settings) ,
'pbesrewr-simplify-quantifiers-rewriter' : lambda name, settings: PbestransformTest(name, 'pbes-simplify-quantifiers-rewriter', settings) ,
'pbesrewr-simplify-quantifiers-data-rewriter' : lambda name, settings: PbestransformTest(name, 'pbes-simplify-quantifiers-data-rewriter', settings),
'pbesinst-lazy' : lambda name, settings: PbesinstTest(name, ['-slazy'], settings) ,
'pbesinst-alternative_lazy' : lambda name, settings: PbesinstTest(name, ['-salternative-lazy'], settings) ,
'pbesinst-finite' : lambda name, settings: PbesinstTest(name, ['-sfinite', '-f*(*:Bool)'], settings) ,
'pbespgsolve' : lambda name, settings: PbespgsolveTest(name, settings) ,
'pbessolve' : lambda name, settings: Pbes2boolTest(name, settings) ,
'pbessolve-depth-first' : lambda name, settings: Pbes2boolDepthFirstTest(name, settings) ,
'pbessolve-counter-example-optimization-0' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 0, settings) ,
'pbessolve-counter-example-optimization-1' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 1, settings) ,
'pbessolve-counter-example-optimization-2' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 2, settings) ,
'pbessolve-counter-example-optimization-3' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 3, settings) ,
'pbessolve-counter-example-optimization-4' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 4, settings) ,
'pbessolve-counter-example-optimization-5' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 5, settings) ,
'pbessolve-counter-example-optimization-6' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 6, settings) ,
'pbessolve-counter-example-optimization-7' : lambda name, settings: Pbes2bool_counter_exampleTest(name, 7, settings) ,
'pbesstategraph' : lambda name, settings: PbesstategraphTest(name, settings) ,
'pbes-unify-parameters' : lambda name, settings: Pbes_unify_parametersTest(name, settings) ,
'pbes-srf' : lambda name, settings: Pbes_srfTest(name, settings) ,
# 'pbessymbolicbisim' : lambda name, settings: PbessymbolicbisimTest(name, settings) , # excluded from the tests because of Z3 dependency
'bessolve' : lambda name, settings: BessolveTest(name, settings) ,
#'stochastic-ltscompare' : lambda name, settings: StochasticLtscompareTest(name, settings) ,
}
# These test do not work on Windows due to dependencies.
if os.name != 'nt':
available_tests.update({'pbessolvesymbolic' : lambda name, settings: PbessolvesymbolicTest(name, settings) })
# available_tests.update({ 'pbesbddsolve' : lambda name, settings: PbesbddsolveTest(name, settings) })
def print_names(tests):
for name in sorted(tests):
print(name)
# Return all tests that match with pattern. In case of an exact match, only this exact match is returned.
def matching_tests(tests, pattern):
matches = [name for name in sorted(tests) if re.search(pattern, name)]
if pattern in matches:
return [pattern]
return matches
def main(tests):
import argparse
cmdline_parser = argparse.ArgumentParser()
cmdline_parser.add_argument('-t', '--toolpath', dest='toolpath', help='The path where the mCRL2 tools are installed')
cmdline_parser.add_argument('-r', '--repetitions', dest='repetitions', metavar='N', default='10', help='Perform N repetitions of each test')
cmdline_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Display additional progress messages.')
cmdline_parser.add_argument('-k', '--keep-files', dest='keep_files', action='store_true', help='Keep the files produced by the test')
cmdline_parser.add_argument('-n', '--names', dest='names', action='store_true', help='Print the names of the available tests')
cmdline_parser.add_argument('-p', '--pattern', dest='pattern', metavar='P', default='.', action='store', help='Run the tests that match with pattern P')
cmdline_parser.add_argument('-o', '--output', dest='output', metavar='o', action='store', help='Run the tests in the given directory')
args = cmdline_parser.parse_args()
if args.names:
print_names(tests)
return
toolpath = args.toolpath
if not toolpath:
toolpath = MCRL2_INSTALL_DIR
settings = {'toolpath': toolpath, 'verbose': args.verbose, 'cleanup_files': not args.keep_files, 'allow-non-zero-return-values': True}
I = range(int(args.repetitions))
if args.output:
if not os.path.exists(args.output):
os.mkdir(args.output)
os.chdir(args.output)
test_failed = False
for name in matching_tests(tests, args.pattern):
try:
for i in I:
test = tests[name]('{}_{}'.format(name, i), settings)
test.execute_in_sandbox()
except Exception as e:
print('An exception occurred:', e.__class__, e)
traceback.print_exc()
test_failed = True
if (test_failed):
sys.exit(-1)
if __name__ == '__main__':
main(available_tests)
``` |
{
"source": "jkeljo/hacs-greeneye-monitor",
"score": 2
} |
#### File: custom_components/greeneye_monitor/config_flow.py
```python
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.const import CONF_PORT
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
class GreeneyeMonitorConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for greeneye_monitor."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> data_entry_flow.FlowResult:
"""Create a config entry from UI."""
if await self.async_set_unique_id(DOMAIN):
self._abort_if_unique_id_configured()
if user_input is not None:
data = {CONF_PORT: user_input[CONF_PORT]}
return self.async_create_entry(title="GreenEye Monitor", data=data)
return self.async_show_form(
step_id="user", data_schema=vol.Schema({vol.Required(CONF_PORT): cv.port})
)
``` |
{
"source": "jkeljo/siobrultech-protocols",
"score": 3
} |
#### File: siobrultech_protocols/gem/fields.py
```python
from abc import ABC, abstractmethod
from datetime import datetime
from enum import Enum, unique
from typing import Any, List
@unique
class ByteOrder(Enum):
# Big-endian (the name comes from the GEM packet format spec)
HiToLo = 1
# Little endian (the name comes from the GEM packet format spec)
LoToHi = 2
@unique
class Sign(Enum):
Signed = 1
Unsigned = 2
class Field(ABC):
def __init__(self, size: int):
self._size = size
@property
def size(self) -> int:
return self._size
@abstractmethod
def read(self, buffer: bytes, offset: int) -> Any:
"""Convert the buffer at the given offset to the proper value."""
class ByteField(Field):
def __init__(self):
super().__init__(size=1)
def read(self, buffer: bytes, offset: int) -> bytes:
return buffer[offset : offset + self.size]
class BytesField(Field):
def read(self, buffer: bytes, offset: int) -> bytes:
return buffer[offset : offset + self.size]
class NumericField(Field):
def __init__(self, size: int, order: ByteOrder, signed: Sign):
super().__init__(size=size)
self.order: ByteOrder = order
self.signed: Sign = signed
def read(self, buffer: bytes, offset: int) -> int:
return _parse(buffer[offset : offset + self.size], self.order, self.signed)
@property
def max(self) -> int:
"""The maximum value that can be encoded in this field."""
bits = 8 * self.size
if self.signed == Sign.Unsigned:
return (1 << bits) - 1
else:
return (1 << (bits - 1)) - 1
class FloatingPointField(Field):
def __init__(self, size: int, order: ByteOrder, signed: Sign, divisor: float):
self.raw_field: NumericField = NumericField(size, order, signed)
super().__init__(size=self.raw_field.size)
self.divisor: float = divisor
def read(self, buffer: bytes, offset: int) -> float:
return self.raw_field.read(buffer, offset) / self.divisor
class DateTimeField(Field):
def __init__(self):
super().__init__(size=6)
def read(self, buffer: bytes, offset: int) -> datetime:
year, month, day, hour, minute, second = buffer[offset : offset + self.size]
return datetime(2000 + year, month, day, hour, minute, second)
class ArrayField(Field):
def __init__(self, num_elems: int, elem_field: Field):
super().__init__(size=num_elems * elem_field.size)
self.elem_field: Field = elem_field
self.num_elems: int = num_elems
def read(self, buffer: bytes, offset: int) -> List[Any]:
return [
self.elem_field.read(buffer, offset + i * self.elem_field.size)
for i in range(self.num_elems)
]
class FloatingPointArrayField(ArrayField):
elem_field: FloatingPointField
def __init__(
self,
num_elems: int,
size: int,
order: ByteOrder,
signed: Sign,
divisor: float,
):
super().__init__(
num_elems=num_elems,
elem_field=FloatingPointField(
size=size, order=order, signed=signed, divisor=divisor
),
)
def read(self, buffer: bytes, offset: int) -> List[float]:
return super().read(buffer, offset)
class NumericArrayField(ArrayField):
elem_field: NumericField
def __init__(self, num_elems: int, size: int, order: ByteOrder, signed: Sign):
super().__init__(
num_elems=num_elems,
elem_field=NumericField(size=size, order=order, signed=signed),
)
def read(self, buffer: bytes, offset: int) -> List[int]:
return super().read(buffer, offset)
@property
def max(self) -> int:
return self.elem_field.max
def _parse(
raw_octets: bytes, order: ByteOrder = ByteOrder.HiToLo, signed: Sign = Sign.Unsigned
) -> int:
"""Reads the given octets as a big-endian value. The function name comes
from how such values are described in the packet format spec."""
octets = list(raw_octets)
if len(octets) == 0:
return 0
if order == ByteOrder.LoToHi:
octets.reverse()
# If this is a signed field (i.e., temperature), the highest-order
# bit indicates sign. Detect this (and clear the bit so we can
# compute the magnitude).
#
# This isn't documented in the protocol spec, but matches other
# implementations.
sign = 1
if signed == Sign.Signed and (octets[0] & 0x80):
octets[0] &= ~0x80
sign = -1
result = 0
for octet in octets:
result = (result << 8) + octet
return sign * result
```
#### File: siobrultech_protocols/gem/packets.py
```python
from __future__ import annotations
import codecs
import json
from collections import OrderedDict
from datetime import datetime
from enum import IntEnum, unique
from typing import Any, Dict, List, Optional
from .fields import (
ByteField,
ByteOrder,
BytesField,
DateTimeField,
Field,
FloatingPointArrayField,
FloatingPointField,
NumericArrayField,
NumericField,
Sign,
)
class MalformedPacketException(Exception):
pass
class Packet(object):
def __init__(
self,
packet_format: PacketFormat,
voltage: float,
absolute_watt_seconds: List[int],
device_id: int,
serial_number: int,
seconds: int,
pulse_counts: List[int],
temperatures: List[float],
polarized_watt_seconds: Optional[List[int]] = None,
currents: Optional[List[float]] = None,
time_stamp: Optional[datetime] = None,
**kwargs: Dict[str, Any]
):
self.packet_format: PacketFormat = packet_format
self.voltage: float = voltage
self.absolute_watt_seconds: List[int] = absolute_watt_seconds
self.polarized_watt_seconds: Optional[List[int]] = polarized_watt_seconds
self.currents: Optional[List[float]] = currents
self.device_id: int = device_id
self.serial_number: int = serial_number
self.seconds: int = seconds
self.pulse_counts: List[int] = pulse_counts
self.temperatures: List[float] = temperatures
if time_stamp:
self.time_stamp: datetime = time_stamp
else:
self.time_stamp: datetime = datetime.now()
def __str__(self) -> str:
return json.dumps(
{
"device_id": self.device_id,
"serial_number": self.serial_number,
"seconds": self.seconds,
"voltage": self.voltage,
"absolute_watt_seconds": self.absolute_watt_seconds,
"polarized_watt_seconds": self.polarized_watt_seconds,
"currents": self.currents,
"pulse_counts": self.pulse_counts,
"temperatures": self.temperatures,
"time_stamp": self.time_stamp.isoformat(),
}
)
@property
def num_channels(self) -> int:
"""The number of channels in the packet given the format. There may be fewer on the device."""
return self.packet_format.num_channels
@property
def type(self) -> str:
"""The packet format type's name."""
return self.packet_format.name
def delta_seconds(self, prev: int) -> int:
field = self.packet_format.fields["seconds"]
assert isinstance(field, NumericField)
return self._delta_value(field, self.seconds, prev)
def delta_pulse_count(self, index: int, prev: int) -> int:
field = self.packet_format.fields["pulse_counts"]
assert isinstance(field, NumericArrayField)
return self._delta_value(field.elem_field, self.pulse_counts[index], prev)
def delta_absolute_watt_seconds(self, index: int, prev: int) -> int:
field = self.packet_format.fields["absolute_watt_seconds"]
assert isinstance(field, NumericArrayField)
return self._delta_value(
field.elem_field, self.absolute_watt_seconds[index], prev
)
def delta_polarized_watt_seconds(self, index: int, prev: int) -> int:
field = self.packet_format.fields["polarized_watt_seconds"]
assert isinstance(field, NumericArrayField)
if self.polarized_watt_seconds is not None:
return self._delta_value(
field.elem_field, self.polarized_watt_seconds[index], prev
)
else:
return 0
def _delta_value(self, field: NumericField, cur: int, prev: int) -> int:
if prev > cur:
diff = field.max + 1 - prev
diff += cur
else:
diff = cur - prev
return diff
@unique
class PacketFormatType(IntEnum):
BIN48_NET_TIME = 4
BIN48_NET = 5
BIN48_ABS = 7
BIN32_NET = 8
BIN32_ABS = 9
class PacketFormat(object):
NUM_PULSE_COUNTERS: int = 4
NUM_TEMPERATURE_SENSORS: int = 8
def __init__(
self,
name: str,
type: PacketFormatType,
num_channels: int,
has_net_metering: bool = False,
has_time_stamp: bool = False,
):
self.name: str = name
self.type: PacketFormatType = type
self.num_channels: int = num_channels
self.fields: OrderedDict[str, Field] = OrderedDict()
self.fields["header"] = NumericField(3, ByteOrder.HiToLo, Sign.Unsigned)
self.fields["voltage"] = FloatingPointField(
2, ByteOrder.HiToLo, Sign.Unsigned, 10.0
)
self.fields["absolute_watt_seconds"] = NumericArrayField(
num_channels, 5, ByteOrder.LoToHi, Sign.Unsigned
)
if has_net_metering:
self.fields["polarized_watt_seconds"] = NumericArrayField(
num_channels, 5, ByteOrder.LoToHi, Sign.Unsigned
)
self.fields["serial_number"] = NumericField(2, ByteOrder.HiToLo, Sign.Unsigned)
self.fields["reserved"] = ByteField()
self.fields["device_id"] = NumericField(1, ByteOrder.HiToLo, Sign.Unsigned)
self.fields["currents"] = FloatingPointArrayField(
num_channels, 2, ByteOrder.LoToHi, Sign.Unsigned, 50.0
)
self.fields["seconds"] = NumericField(3, ByteOrder.LoToHi, Sign.Unsigned)
self.fields["pulse_counts"] = NumericArrayField(
PacketFormat.NUM_PULSE_COUNTERS, 3, ByteOrder.LoToHi, Sign.Unsigned
)
self.fields["temperatures"] = FloatingPointArrayField(
PacketFormat.NUM_TEMPERATURE_SENSORS,
2,
ByteOrder.LoToHi,
Sign.Signed,
2.0,
)
if num_channels == 32:
self.fields["spare_bytes"] = BytesField(2)
if has_time_stamp:
self.fields["time_stamp"] = DateTimeField()
self.fields["footer"] = NumericField(2, ByteOrder.HiToLo, Sign.Unsigned)
self.fields["checksum"] = ByteField()
@property
def size(self) -> int:
result = 0
for value in self.fields.values():
result += value.size
return result
def parse(self, packet: bytes) -> Packet:
if len(packet) < self.size:
raise MalformedPacketException(
"Packet too short. Expected {0} bytes, found {1} bytes.".format(
self.size, len(packet)
)
)
_checksum(packet, self.size)
offset = 0
args = {
"packet_format": self,
}
for key, value in self.fields.items():
args[key] = value.read(packet, offset)
offset += value.size
if args["footer"] != 0xFFFE:
raise MalformedPacketException(
"bad footer {0} in packet: {1}".format(
hex(args["footer"]), codecs.encode(packet, "hex") # type: ignore
)
)
return Packet(**args) # type: ignore
def _checksum(packet: bytes, size: int):
checksum = 0
for i in packet[: size - 1]:
checksum += i
checksum = checksum % 256
if checksum != packet[size - 1]:
raise MalformedPacketException(
"bad checksum for packet: {0}".format(codecs.encode(packet[:size], "hex"))
)
BIN48_NET_TIME = PacketFormat(
name="BIN48-NET-TIME",
type=PacketFormatType.BIN48_NET_TIME,
num_channels=48,
has_net_metering=True,
has_time_stamp=True,
)
BIN48_NET = PacketFormat(
name="BIN48-NET",
type=PacketFormatType.BIN48_NET,
num_channels=48,
has_net_metering=True,
has_time_stamp=False,
)
BIN48_ABS = PacketFormat(
name="BIN48-ABS",
type=PacketFormatType.BIN48_ABS,
num_channels=48,
has_net_metering=False,
has_time_stamp=False,
)
BIN32_NET = PacketFormat(
name="BIN32-NET",
type=PacketFormatType.BIN32_NET,
num_channels=32,
has_net_metering=True,
has_time_stamp=False,
)
BIN32_ABS = PacketFormat(
name="BIN32-ABS",
type=PacketFormatType.BIN32_ABS,
num_channels=32,
has_net_metering=False,
has_time_stamp=False,
)
```
#### File: tests/gem/test_api.py
```python
from __future__ import annotations
import asyncio
import unittest
from datetime import datetime, timedelta
from typing import Optional
from unittest.async_case import IsolatedAsyncioTestCase
from unittest.mock import patch
import pytest
from siobrultech_protocols.gem.api import (
GET_SERIAL_NUMBER,
SET_DATE_AND_TIME,
SET_PACKET_FORMAT,
SET_PACKET_SEND_INTERVAL,
SET_SECONDARY_PACKET_FORMAT,
ApiCall,
R,
T,
call_api,
get_serial_number,
set_date_and_time,
set_packet_format,
set_packet_send_interval,
set_secondary_packet_format,
synchronize_time,
)
from siobrultech_protocols.gem.packets import PacketFormatType
from siobrultech_protocols.gem.protocol import (
API_RESPONSE_WAIT_TIME,
BidirectionalProtocol,
PacketProtocolMessage,
)
from tests.gem.mock_transport import MockRespondingTransport, MockTransport
class TestApi(unittest.TestCase):
def setUp(self):
self._queue: asyncio.Queue[PacketProtocolMessage] = asyncio.Queue()
self._transport = MockTransport()
self._protocol = BidirectionalProtocol(self._queue)
self._protocol.connection_made(self._transport)
# Put the protocol into a state where it's ready for commands
# and we can see exactly what is sent
self._protocol.begin_api_request()
self._transport.writes.clear()
def testApiCall(self):
call = ApiCall(lambda _: "REQUEST", lambda response: response)
self.assertCall(call, "REQUEST", None, None, "RESPONSE".encode(), "RESPONSE")
def testApiCallWithSerialNumber(self):
call = ApiCall(lambda _: "^^^REQUEST", lambda response: response)
self.assertCall(
call, "^^^NMB02345REQUEST", None, 1002345, "RESPONSE".encode(), "RESPONSE"
)
def testGetSerialNumber(self):
self.assertCall(
GET_SERIAL_NUMBER, "^^^RQSSRN", None, None, "1234567".encode(), 1234567
)
def testSetDateTime(self):
self.assertCall(
SET_DATE_AND_TIME,
"^^^SYSDTM12,08,23,13,30,28\r",
datetime.fromisoformat("2012-08-23 13:30:28"),
None,
"DTM\r\n".encode(),
True,
)
def testSetPacketFormat(self):
self.assertCall(
SET_PACKET_FORMAT,
"^^^SYSPKT02",
2,
None,
"PKT\r\n".encode(),
True,
)
def testSetPacketSendInterval(self):
self.assertCall(
SET_PACKET_SEND_INTERVAL,
"^^^SYSIVL042",
42,
None,
"IVL\r\n".encode(),
True,
)
def testSetSecondaryPacketFormat(self):
self.assertCall(
SET_SECONDARY_PACKET_FORMAT,
"^^^SYSPKF00",
0,
None,
"PKF\r\n".encode(),
True,
)
def assertCall(
self,
call: ApiCall[T, R],
request: str,
arg: T,
serial_number: Optional[int],
encoded_response: bytes,
parsed_response: R,
):
self.assertEqual(
call.send_request(self._protocol, arg, serial_number),
API_RESPONSE_WAIT_TIME,
)
self.assertEqual(
self._transport.writes,
[request.encode()],
f"{request.encode()} should be written to the transport",
)
self._protocol.data_received(encoded_response)
self.assertEqual(
call.receive_response(self._protocol),
parsed_response,
f"{parsed_response} should be the parsed value returned",
)
class TestContextManager(IsolatedAsyncioTestCase):
def setUp(self):
self._queue: asyncio.Queue[PacketProtocolMessage] = asyncio.Queue()
self._transport = MockTransport()
self._protocol = BidirectionalProtocol(self._queue)
self._protocol.connection_made(self._transport)
@pytest.mark.asyncio
@patch(
"siobrultech_protocols.gem.protocol.API_RESPONSE_WAIT_TIME",
timedelta(seconds=0),
)
@patch(
"siobrultech_protocols.gem.protocol.PACKET_DELAY_CLEAR_TIME",
timedelta(seconds=0),
)
async def testApiCall(self):
call = ApiCall(lambda _: "REQUEST", lambda response: response)
async with call_api(call, self._protocol) as f:
self.setApiResponse("RESPONSE".encode())
response = await f(None)
self.assertEqual(response, "RESPONSE")
@pytest.mark.asyncio
async def testTaskCanceled(self):
call = ApiCall(lambda _: "REQUEST", lambda response: response)
with self.assertRaises(asyncio.CancelledError):
with patch("asyncio.sleep") as mock_sleep:
mock_sleep.side_effect = asyncio.CancelledError
async with call_api(call, self._protocol):
raise AssertionError("this should not be reached")
def setApiResponse(self, ecnoded_response: bytes) -> asyncio.Task[None]:
async def notify_data_received() -> None:
self._protocol.data_received(ecnoded_response)
return asyncio.create_task(
notify_data_received(), name=f"{__name__}:send_api_resonse"
)
class TestApiHelpers(IsolatedAsyncioTestCase):
def setUp(self):
self._protocol = BidirectionalProtocol(asyncio.Queue())
patcher_API_RESPONSE_WAIT_TIME = patch(
"siobrultech_protocols.gem.protocol.API_RESPONSE_WAIT_TIME",
timedelta(seconds=0),
)
patcher_API_RESPONSE_WAIT_TIME.start()
self.addCleanup(lambda: patcher_API_RESPONSE_WAIT_TIME.stop())
patcher_PACKET_DELAY_CLEAR_TIME = patch(
"siobrultech_protocols.gem.protocol.PACKET_DELAY_CLEAR_TIME",
timedelta(seconds=0),
)
patcher_PACKET_DELAY_CLEAR_TIME.start()
self.addCleanup(lambda: patcher_PACKET_DELAY_CLEAR_TIME.stop())
@pytest.mark.asyncio
async def test_get_serial_number(self):
transport = MockRespondingTransport(self._protocol, "1234567".encode())
self._protocol.connection_made(transport)
serial = await get_serial_number(self._protocol)
self.assertEqual(serial, 1234567)
@pytest.mark.asyncio
async def test_set_date_and_time(self):
transport = MockRespondingTransport(self._protocol, "DTM\r\n".encode())
self._protocol.connection_made(transport)
success = await set_date_and_time(self._protocol, datetime(2020, 3, 11))
self.assertTrue(success)
@pytest.mark.asyncio
async def test_set_packet_format(self):
transport = MockRespondingTransport(self._protocol, "PKT\r\n".encode())
self._protocol.connection_made(transport)
success = await set_packet_format(self._protocol, PacketFormatType.BIN32_ABS)
self.assertTrue(success)
@pytest.mark.asyncio
async def test_set_packet_send_interval(self):
with self.assertRaises(ValueError):
await set_packet_send_interval(self._protocol, -1)
with self.assertRaises(ValueError):
await set_packet_send_interval(self._protocol, 257)
transport = MockRespondingTransport(self._protocol, "IVL\r\n".encode())
self._protocol.connection_made(transport)
success = await set_packet_send_interval(self._protocol, 42)
self.assertTrue(success)
@pytest.mark.asyncio
async def test_set_secondary_packet_format(self):
transport = MockRespondingTransport(self._protocol, "PKF\r\n".encode())
self._protocol.connection_made(transport)
success = await set_secondary_packet_format(
self._protocol, PacketFormatType.BIN32_ABS
)
self.assertTrue(success)
@pytest.mark.asyncio
async def test_synchronize_time(self):
transport = MockRespondingTransport(self._protocol, "DTM\r\n".encode())
self._protocol.connection_made(transport)
success = await synchronize_time(self._protocol)
self.assertTrue(success)
``` |
{
"source": "jkeljo/sisyphus-control",
"score": 2
} |
#### File: sisyphus-control/sisyphus_control/data.py
```python
import asyncio
from collections import UserDict
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Union
CollectionListener = Union[Callable[[], None], Callable[[], Awaitable[None]]]
class Model(UserDict):
"""Holds the data about one entity in a collection."""
def __init__(self, data: Dict[str, Any]):
super().__init__(data)
async def update_from_changes(self, changes: 'Model') -> bool:
data_changed = False
for key, value in changes.items():
if not key in self or self[key] != value:
self[key] = value
data_changed = True
return data_changed
if TYPE_CHECKING:
CollectionBase = UserDict[Union[str, int], Model]
else:
CollectionBase = UserDict
class Collection(CollectionBase):
"""Holds all the data returned by the table, as Model objects keyed by ID."""
def __init__(self):
super().__init__()
self._listeners: List[CollectionListener] = []
async def add(self, item: Model) -> None:
id = item.data["id"]
if id in self:
should_notify = await self[id].update_from_changes(item)
else:
self[id] = item
should_notify = True
if should_notify:
await self._notify_listeners()
def add_listener(self, listener: CollectionListener) -> None:
self._listeners.append(listener)
def remove_listener(self, listener: CollectionListener) -> None:
self._listeners.remove(listener)
async def _notify_listeners(self) -> None:
listeners = list(self._listeners)
for listener in listeners:
await asyncio.coroutine(listener)() # type: ignore
if __name__ == "__main__":
import aiounittest
import unittest
from unittest.mock import MagicMock
class CollectionTests(aiounittest.AsyncTestCase):
async def test_add(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
returned = coll.get(12345)
self.assertEqual(item, returned)
async def test_update_does_not_remove_value(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
delta = Model({"id": 12345})
await coll.add(delta)
returned = coll.get(12345)
self.assertEqual(returned, item)
async def test_update_adds_new_keys(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
delta = Model({"id": 12345, "key2": "value2"})
await coll.add(delta)
returned = coll.get(12345)
expected = Model({"id": 12345, "key": "value", "key2": "value2"})
self.assertEqual(returned, expected)
async def test_update_changes_values(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
delta = Model({"id": 12345, "key": "new_value"})
await coll.add(delta)
returned = coll.get(12345)
self.assertEqual(returned, delta)
async def test_update_notifies_listeners(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
listener = MagicMock()
coll.add_listener(listener)
delta = Model({"id": 12345, "key": "new_value"})
await coll.add(delta)
assert listener.called
async def test_no_op_update_does_not_notify_listeners(self) -> None:
coll = Collection()
item = Model({"id": 12345, "key": "value"})
await coll.add(item)
listener = MagicMock()
coll.add_listener(listener)
delta = Model({"id": 12345, "key": "value"})
await coll.add(delta)
assert not listener.called
unittest.main()
```
#### File: sisyphus-control/sisyphus_control/playlist.py
```python
from datetime import datetime
from typing import Any, Dict, ForwardRef, List, Optional, Type
from . import table
from .data import Model
from .log import log_data_change
from .track import Track
from .transport import TableTransport
from .sisbot_json import parse_bool
class Playlist:
"""Represents a playlist in the context of a table. If working with
multiple tables that have the same playlist loaded, multiple Playlist objects
will be created for that playlist -- one for each table that has it loaded."""
parent: 'table.Table'
def __init__(
self,
table: 'table.Table',
transport: TableTransport,
data: Model):
self.parent = table
self._transport: TableTransport = transport
self._data: Model = data
def __str__(self) -> str:
return "{name} v{version} ({num_tracks} tracks)".format(
name=self.name,
version=self.version,
num_tracks=len(self.tracks))
@ property
def id(self) -> str:
return self._data["id"]
@ property
def name(self) -> str:
return self._data["name"]
@ property
def tracks(self) -> List[Track]:
return [
self._get_track_by_index(index)
for index in self._data["sorted_tracks"]]
def get_tracks_named(self, name: str) -> List[Track]:
return [track for track in self.tracks if track.name == name]
def _get_track_by_index(self, index: int) -> Track:
return Track(self, self._transport, self._data["tracks"][index])
@ property
def is_loop(self) -> bool:
return parse_bool(self._data["is_loop"])
@ property
def is_shuffle(self) -> bool:
return parse_bool(self._data["is_shuffle"])
async def set_shuffle(self, value: bool) -> None:
if self.parent.active_playlist != self:
raise Exception(
"set_shuffle may only be called on the active playlist")
if value == self.is_shuffle:
return
await self._transport.post("set_shuffle",
{"value": str(value).lower()})
@ property
def description(self) -> str:
return self._data["description"]
@ property
def created_time(self) -> datetime:
return _parse_date(self._data["created_at"])
@ property
def updated_time(self) -> datetime:
return _parse_date(self._data["updated_at"])
@ property
def version(self) -> int:
return int(self._data["version"])
@ property
def active_track(self) -> Optional[Track]:
index = self._data["active_track_index"]
if index < 0:
return None
return self._get_track_by_index(index)
async def play(self, track: Optional[Track] = None) -> None:
if track:
if track.parent != self:
raise ValueError("Track object is not part of this playlist")
self._data["active_track_index"] = track.index_in_playlist
self._data["active_track_id"] = track.id
await self._transport.post("set_playlist", self._data.data)
await self.parent.play()
def _parse_date(date_str: str) -> datetime:
return datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
``` |
{
"source": "jkellers/traffic-cam",
"score": 3
} |
#### File: traffic-cam/bin/05_count_to_api.py
```python
import requests
from traffic_cam import classifier, io, predictor, paths
from time import sleep
def loop(classifier_model, predictor_model):
# download image
path = io.download_image()
# classify image location
location = classifier.move_image_by_class(source_path=path, model=classifier_model)
# if image location not useful, wait and download new image
if "street" not in location:
print("Not a street, skipping.")
return False
# count persons on image
persons = predictor_model.predict_image(image_path=paths.TRAIN_DIR / location / path.name, plot=False)
# collect additional information for API
rain = True # 90 % accuracy for Münster, good enough
# build response
response = {
"count": persons,
"timestamp": io.get_timestamp_isoformat(),
"device_id": location,
"data": {
"rain": rain,
}
}
print(response)
# send count to API
response = requests.post(
"https://counting-backend.codeformuenster.org/counts/",
json=response,
)
if __name__ == "__main__":
cl = classifier.get_classifier_model()
pr = predictor.Predictor()
while True:
try:
loop(classifier_model=cl, predictor_model=pr)
sleep(10)
except KeyboardInterrupt:
print("Interrupted by user.")
```
#### File: traffic-cam/traffic_cam/classifier.py
```python
import json
import logging
import shutil
from pathlib import Path
from typing import Dict
import numpy as np
import wget
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
from traffic_cam import paths
def train_classifier_model(n_classes: int, learning_rate: float) -> Model:
# build model
base_model = MobileNet(
weights="imagenet", include_top=False
) # imports the mobilenet model and discards the last 1000 neuron layer.
x = base_model.output
x = GlobalAveragePooling2D()(x)
# we add dense layers so that the model can learn more complex functions
# and classify for better results.
x = Dense(1024, activation="relu")(x)
x = Dense(1024, activation="relu")(x) # dense layer 2
x = Dense(512, activation="relu")(x) # dense layer 3
preds = Dense(n_classes, activation="softmax")(
x
) # final layer with softmax activation
model = Model(inputs=base_model.input, outputs=preds)
# set only final layers trainable
for layer in model.layers[:20]:
layer.trainable = False
for layer in model.layers[20:]:
layer.trainable = True
# compile
opt = Adam(learning_rate=learning_rate)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
return model
def get_classifier_model() -> Model:
"""Download image classifier (if not exists), and load to memory."""
if not paths.CLASSIFIER_HDF5.exists():
wget.download(
url="https://github.com/codeformuenster/traffic-cam-data/blob/master/model/model.hdf5?raw=true",
out=str(paths.CLASSIFIER_HDF5),
)
return load_model(str(paths.CLASSIFIER_HDF5))
def classify_image(filepath: Path, model: Model) -> np.ndarray:
# preprocess image
img = image.load_img(str(filepath), target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# predict with model
return model.predict(x)
def move_image_by_class(source_path: Path, model: Model):
predictions = classify_image(filepath=source_path, model=model)
predicted_class: str = get_predicted_class(predictions=predictions)
target_dir = paths.TRAIN_DIR / predicted_class
target_dir.mkdir(parents=True, exist_ok=True)
shutil.move(source_path, target_dir / source_path.name)
logging.info(f"Moved image: {source_path.name}.")
return predicted_class
def load_class_indices() -> Dict[str, int]:
with open(paths.CLASSES_JSON, "r") as f:
classes: Dict[str, int] = json.loads(f.read())
return classes
def get_predicted_class(predictions: np.ndarray) -> str:
classes = load_class_indices()
return list(classes.keys())[np.argmax(predictions)]
```
#### File: traffic-cam/traffic_cam/io.py
```python
import argparse
import datetime
import logging
import subprocess
from pathlib import Path
from typing import Optional
from traffic_cam import paths
def get_timestamp_isoformat() -> str:
return datetime.datetime.utcnow().isoformat()
def download_frame(path: str):
download_command = f"""ffmpeg \
-i https://5f27cc8163c2e.streamlock.net/833/default.stream/playlist.m3u8?wowzatokenhash=0mfLM7iDsbsXsvj91j1LqHWRrf2ZMRArPtr8efxJnjU= \
-vframes 1 \
{path}"""
subprocess.run(download_command.split(), check=True)
def download_image() -> Optional[Path]:
"""Download current image and return it's path."""
timestamp = get_timestamp_isoformat()
filename = f"image_{timestamp}.jpg"
path = paths.DATA_DIR / filename
try:
download_frame(path=path)
except subprocess.CalledProcessError as e:
logging.error(f"Failed to download frame: {e}")
return None
logging.info(f"Downloaded image: {filename}.")
return path
def str2bool(value: str) -> bool:
if isinstance(value, bool):
return value
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
```
#### File: traffic-cam/traffic_cam/paths.py
```python
from pathlib import Path
# data
DATA_DIR = Path("data/")
TRAIN_DIR = DATA_DIR / "train"
# image classifier
CLASSIFIER_DIR = Path("data/classifier/")
CLASSIFIER_HDF5 = CLASSIFIER_DIR / "model.hdf5"
CLASSES_JSON = CLASSIFIER_DIR / "class_indices.json"
# yolo
YOLO_CFG = Path("darknet/cfg/yolov3.cfg")
YOLO_WEIGHTS = Path("cfg/yolov3.weights")
YOLO_CLASSES = Path("data/coco.names")
# output
OUTPUT_DIR = Path("output/")
# api
CLASS_LOCATION = CLASSIFIER_DIR / "class_location.json"
API_URL = "https://counting-backend.codeformuenster.org"
def create_paths_if_not_exist():
"""Create defined paths if they don't exist already, including parents."""
DATA_DIR.mkdir(parents=True, exist_ok=True)
TRAIN_DIR.mkdir(parents=True, exist_ok=True)
CLASSIFIER_DIR.mkdir(parents=True, exist_ok=True)
``` |
{
"source": "jkelleyrtp/electron-optimization",
"score": 3
} |
#### File: electron-optimization/potential_optimizer/potential_optimizer.py
```python
import sys
from math import sin, cos, tan, radians, sqrt, ceil
import pyopencl as cl
import numpy as np
import pyopencl.array as cl_array
from scipy.special import ellipk, ellipe
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sb
import cPickle as pickle
# Constants
mu_0 = 1.25663706e-6
ellipe_table = ellipe(np.arange(0,1, 1.0/10000000.0))
ellipk_table = ellipk(np.arange(0,1,1.0/10000000.0))
e_charge = 1.6e-19 # Coulombs
e_mass = 9.1e-31 # Kilograms
'''
Takes array of coils and displays to screen. First and second coils are bounding
box coils.
Positions is list of positions
'''
class all:
def __init__(self):
print '-- New all object created --'
# call GPU building
# initialize GPU
# load single particle simulation code
# pass positions, velocities, coils
# electron gun function returns positions and velocities
class _GPU:
def __init__(self, filename, device_id = 1):
# Setup OpenCL platform
platform = cl.get_platforms()
computes = [platform[0].get_devices()[device_id]]
print "New context created on", computes
self.ctx = cl.Context(devices=computes)
self.queue = cl.CommandQueue(self.ctx)
self.mf = cl.mem_flags
# Open and build cl code
f = open(filename, 'r')
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def execute(self, sim, quiet=False):
# 1 float is 4 bytes
# Prepare input, output, and lookup val buffers
self.p_buf = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=sim.positions ) # Positions
self.v_buf = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=sim.velocities ) # Velocities
self.coil_buf = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=sim.coils ) # Coils
self.c_spheres_buf = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = sim.c_spheres)# Charge spheres
self.ee = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=sim.ee_table ) # Elliptical Integral 1
self.ek = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=sim.ek_table ) # Elliptical Integral 2
self.d_buf = cl.Buffer(self.ctx, self.mf.WRITE_ONLY, sim.bytesize * sim.num_particles * sim.num_steps) # Output r^2 buffer
self.queue.finish()
# Run Kernel
kernelargs = (self.p_buf, self.v_buf, self.coil_buf, self.c_spheres_buf, self.ee, self.ek, self.d_buf, sim.sim_properties, sim.dt)
#program.compute_trajectory(queue, (1,), None, np.array([0.0,0.01,0.01]), np.array([1.0,1.0,500000]), np.array([0,.0375,.1,0.0,.05,0.0375,-.1,0.0]), np.array([1]), np.array([1]), np.array())
if quiet!=True: print "Values successfully passed"
self.program.compute_trajectory(self.queue, (int(sim.num_particles),), None, *(kernelargs))
if quiet!=True: print "Kernels started"
self.queue.finish()
# Dump, clean, return -- must reshape data when using float4s
self.ret_val = np.empty_like(np.ndarray((sim.num_particles, sim.num_steps, sim.bytesize/4)).astype(np.float32))
read = cl.enqueue_copy(self.queue, self.ret_val, self.d_buf)
self.queue.finish()
read.wait()
# print (read.profile.end-read.profile.start)
self.d_buf.release()
print "\a"
if quiet!=True: print "Simulation finished"
return self.ret_val
class _SIMOBJECT:
def __init__(self, positions, velocities, coils, num_particles, steps, bytesize=4, iter_nth = 1, dt = .0000000000002, num_coils = 2, avg_velo = 0, c_charge = 0.0):
self.positions = positions.astype(np.float64)
self.velocities = velocities.astype(np.float64)
self.coils = np.array(coils).astype(np.float32)
self.num_particles = np.int32(num_particles)
self.num_steps = np.int32(steps)
self.bytesize = bytesize
self.ee_table = ellipe_table.astype(np.float32)
self.ek_table = ellipk_table.astype(np.float32)
self.dt = np.float64(dt)
self.iter_nth = np.int32(iter_nth)
self.num_coils = np.int32(num_coils)
self.sim_properties = np.asarray([self.num_particles, self.num_steps, self.iter_nth, self.num_coils]).astype(np.int32)
self.avg_velo = avg_velo
self.c_spheres = np.asarray([c_charge]*num_particles, dtype = np.float64)
def get_conf_times(self, store=True):
conf_times = []
#print radius, z_pos, dt, iter_nth
radius = self.coils[0][1]
z_pos = self.coils[1][0]
dt = self.dt
iter_nth = self.iter_nth
r_vals = self.r_vals
for p in range(len(r_vals)) :
x_conf = len(np.where( abs(r_vals[p][:,0]) < radius)[0]) * dt * iter_nth * 1e9
y_conf = len(np.where( abs(r_vals[p][:,1]) < radius)[0]) * dt * iter_nth * 1e9
z_conf = len(np.where( abs((z_pos/2.0) - r_vals[p][:,2]) < (z_pos/2.0))[0]) * dt * iter_nth * 1e9
conf_times.append(np.amin([x_conf,y_conf,z_conf]))
if(store):
self.conf_times = conf_times
else:
return conf_times
def graph_conf_times(self, markersize = .5):
def graph_clicked(event):
print "clicked"
self.graph_trajectory(int(event.x))
fig = plt.figure()
fig.canvas.mpl_connect('button_press_event', graph_clicked)
plt.subplot(121)
plt.scatter(range(len(self.conf_times)), self.conf_times, s = markersize)
plt.show()
plt.title("Mean time: " + str(np.mean(self.conf_times)) + " | First 20% Mean: " + str(np.mean(self.conf_times[0:int(0.2 * len(self.conf_times))])))
def graph_trajectory(self, run_id):
positions = self.r_vals[run_id]
coil_1 = self.coils[run_id*self.num_coils]
coil_2 = self.coils[run_id*self.num_coils+1]
r = coil_1[1] # the radius of the circle
steps = len(positions)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(positions[:,0], positions[:,1], zs= positions[:,2])
ax.set_xlim([r, r * -1])
ax.set_ylim([r, r * -1])
ax.set_zlim([0, coil_2[0]])
theta = np.linspace(0, 2*np.pi, 100)
# compute x1 and x2
loop_x = r*np.cos(theta)
loop_y = r*np.sin(theta)
loop_z=0
ax.plot(loop_x,loop_y, loop_z)
ax.plot(loop_x,loop_y, coil_2[0])
ax.scatter(positions[0][0],positions[0][1],positions[0][2], color="green")
ax.scatter(positions[steps-2][0],positions[steps-2][1],positions[steps-2][2], color="red")
class _COIL:
def __init__(self, radius = 0.05, current = 10000, z_pos = 0.0):
self.radius = radius
self.current = current
self.z_pos = z_pos
self.position = [0.0, 0.0, z_pos, 0.0]
self.B_0 = self.current * mu_0 / (2.0 * self.radius)
self.arr = np.array([z_pos, radius, self.B_0, 0.0]).astype(np.float32)
def single_sim(self, device_id = 0):
# Generate a single electron pos data
# best of 1105.824 at -5000, 5000 [ 0,0.0004, -.03], [0,-5e3, 7e5]
sp_charge = -1e-8
#sp_charge = -15e-9
ct = 23000
#ct = 20000
major_R = .014
#major_R = .006
zvelo = 1e6
coil_1 = self._COIL( radius = .1, current = ct, z_pos = 0.0 )
coil_2 = self._COIL( radius = .1, current = -ct, z_pos = 0.1)
#coil_3 = self._COIL( radius = .03, current = 3000, z_pos = 0.06 )
#coil_4 = self._COIL( radius = .03, current = -3000, z_pos = -.01 )
coils = [coil_1.arr, coil_2.arr]#, coil_3.arr, coil_4.arr]
# Constants
e_charge = 1.6e-19 # Coulombs
e_mass = 9.1e-31 # Kilograms
e_gun_energy = 0 # measured in volts
avg_velo = sqrt( (2.0 * e_gun_energy * e_charge) / e_mass) # m/s
positions = np.array([[0.0000 , major_R, -0.03, 0.0,]])
#velocities = np.array([[0.0, 0, avg_velo ,0.0,]]) #9.70017400e+05
#velocities = np.array([[1e2, 0, avg_velo,0.0,]]) #9.70017400e+05
velocities = np.array([[1e3, 0, zvelo]]) #9.70017400e+05
print velocities
#coils[0][2] = 0.06578967
#coils[1][2] = -0.06578967
num_particles = 1
steps = 350000; #350000;
bytesize = 16
iter_nth = 36;
dt = .0000000000002
self.SINGLE_SIM = self._SIMOBJECT(positions, velocities, coils, num_particles, steps,num_coils = len(coils), bytesize = bytesize, iter_nth=iter_nth, dt = dt, c_charge = sp_charge)# -3e-11)#, c_charge = -1e-7)
self.SINGLE_SIM.calculator = self._GPU(path_to_integrator, device_id)
self.SINGLE_SIM.r_vals = self.SINGLE_SIM.calculator.execute( self.SINGLE_SIM)
a = self.SINGLE_SIM.r_vals[0]
self.SINGLE_SIM.graph_trajectory(0);
self.SINGLE_SIM.get_conf_times()
#self.SINGLE_SIM.conf_times = self.get_conf_times(self.SINGLE_SIM.r_vals, coil_1.radius, coil_2.z_pos, dt, iter_nth)
#self, r_vals, radius, z_pos, dt, iter_nth
print "Total confinement:", self.SINGLE_SIM.conf_times[0]
plt.title(("Total confinement:", self.SINGLE_SIM.conf_times[0], " ns"))
plt.show()
def generic_simulation(self, num_particles = 10000, steps = 9000000, egun_energy = 1000, coil_current = 5000, e_gun_z = -.03, c_charge = 0.0, injection_radius= .0006,memory = 3000000000):
coil_1 = self._COIL( radius = .05, current = coil_current, z_pos = 0.0 )
coil_2 = self._COIL( radius = .05, current = coil_current*-1.0, z_pos = 0.05 )
coils = [coil_1.arr, coil_2.arr]
# Control parameters
memory = memory
bytesize = 16
num_particles = num_particles
total_steps = steps # ten million
dt = .0000000000002
mem_p_particle = memory/num_particles # can serve so many bytes to display
steps = mem_p_particle/bytesize
iter_nth = total_steps/steps
print "Steps: ",steps," iter_nth: ", iter_nth
e_gun_energy = egun_energy # measured in volts
avg_velo = sqrt( (2.0 * e_gun_energy * e_charge) / e_mass) # m/s
positions = np.tile( [0.0 ,injection_radius, e_gun_z, 0.0], (num_particles, 1))
velocities = np.tile ([1e3, 0.0, avg_velo, 0.0],(num_particles, 1) )
coils = np.tile(coils,(num_particles, 1) )
c_spheres = np.asarray([c_charge]*num_particles, dtype=np.float64)
return self._SIMOBJECT(positions, velocities, coils, num_particles, steps, bytesize = bytesize, iter_nth=iter_nth, dt = dt, avg_velo = avg_velo, c_charge = c_charge)
def nd_paramspace(self, data, device_id = 2):
'''
Data is an array shaped into a set of paramters for the simulation
Data is not a meshgrid, but rathter a list of arrays for each paramter.
a[0] = injection_radius
a[1] = Z_velocitiy
a[2] = coil_current
a[3] = coil_separation
a[4] = space_charge
'''
paramspace = np.array(np.meshgrid(*data)).T.reshape(-1, len(data))
num_particles = len(paramspace)
positions = np.zeros((num_particles, 4))
positions[:,1] = paramspace[:,0]
positions[:,2] = -.03
velocities = np.zeros((num_particles, 4))
velocities[:,2] = paramspace[:,1]
velocities[:,0] = 1e3
# z, r, B_0
coil_radius = 0.05
coil_current = paramspace[:,2]
coil_separation = paramspace[:,3]
coils = np.zeros((num_particles*2, 4)).astype(np.float32)
coils[:,0][1::2] = coil_separation
coils[:,1] = coil_radius # Coil radius
coils[:,2] = coil_current.repeat(2) * mu_0 / (2.0 * coil_radius)
coils[:,2][1::2] *= -1.0
# we want 1000 location points per run
# 3gb / 1000 = 750000 max_particles per run (memory limited)
#particles_per_run
ppr = 65536
num_runs = int(ceil(num_particles / float(ppr) ))
print "Number of runs required: " + str(num_runs)
self.simulations = []
for i in range(int(num_runs)):
self.simulations.append( self._SIMOBJECT(positions[ppr*i:ppr*(i+1)], velocities[ppr*i:ppr*(i+1)], coils[ppr*i:ppr*(i+1)], num_particles =ppr, steps = 400, num_coils = 2, dt = .0000000000002, bytesize = 16, iter_nth = 10000, c_charge = -1e-12))
print "All simulations created"
sim_id = 0
for sim in self.simulations:
print "Running simulation - " + str(sim_id)
if sim_id > -1: # change this id to skip over runs if gpu crashes
sim.r_vals = self._GPU(path_to_integrator, device_id).execute(sim) # Returns r_vals
np.save("simulations/Simulation - part "+str(sim_id), sim.get_conf_times(store=False))
sim_id+=1
print 'Simulations complete'
#self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
#self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
def paramspace_per_sc(self, device_id):
slices = 25
injection_radius = np.linspace(0.0005, 0.005, slices)
z_velocitiy = np.linspace(.5e6, 5e7, slices)
coil_current = np.linspace(5000.0, 15000.0, slices)
coil_separation = np.linspace(0.03, 0.1, slices)
r_vals = self.nd_paramspace([injection_radius,z_velocitiy,coil_current,coil_separation])
def paramspace_detailed(self, device_id):
injection_radius = np.linspace(0.0005, 0.01, 100)
z_velocitiy = np.linspace(.5e6, 5e7, 100)
coil_current = np.linspace(5000.0, 15000.0, 100)
coil_separation = np.linspace(0.05, 0.1, 1)
r_vals = self.nd_paramspace([injection_radius,z_velocitiy,coil_current,coil_separation], device_id)
def paramspace_single(self, device_id):
injection_radius = np.linspace(0.0001, 0.018, 1000)
z_velocitiy = np.linspace(1e6, 5e7, 1)
coil_current = np.linspace(5000.0, 15000.0, 1)
coil_separation = np.linspace(0.05, 0.1, 1)
r_vals = self.nd_paramspace([injection_radius,z_velocitiy,coil_current,coil_separation],device_id)
def gun_v_l(self, device_id=2):
self.GUN_L = self.generic_simulation(egun_energy=1000, coil_current=40000)
position_arr = np.linspace(0, -0.05, self.GUN_L.num_particles )
self.GUN_L.positions[:,2] = position_arr
self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
self.GUN_L.conf_times = self.GUN_L.get_conf_times()
def gun_v_l(self, device_id=2):
self.GUN_L = self.generic_simulation(egun_energy=1000, coil_current=40000)
position_arr = np.linspace(0, -0.05, self.GUN_L.num_particles )
self.GUN_L.positions[:,2] = position_arr
self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
self.GUN_L.conf_times = self.GUN_L.get_conf_times()
def r_v_E(self, device_id = 2):
self.GUN_L = self.generic_simulation(num_particles = 32768, egun_energy=500, coil_current=1000, e_gun_z = -.1)
r_lin = np.tile(np.linspace(-0.0001, -0.001, 32 ), (1, 32))[0]
l_lin = np.linspace(-.02, -.06, 32).repeat(32)
v_lin = (np.linspace(.01, 1, 32) * self.GUN_L.avg_velo).repeat(1024)
self.GUN_L.positions[:,0] = r_lin.repeat(32)
self.GUN_L.positions[:,2] = l_lin.repeat(32)
self.GUN_L.velocities[:,2] = v_lin
self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
self.GUN_L.conf_times = self.GUN_L.get_conf_times()
self.GUN_L.graph_conf_times()
def egunE_v_CC(self,device_id = 2):
cc_slices = 100
ee_slices = 150
cc = 10000
ee = 3000
row = cc_slices
col = ee_slices
self.GUN_L = self.generic_simulation(num_particles = (row*col), egun_energy=ee, coil_current=cc, e_gun_z = -.03, c_charge = -1e-9)
v_lin = (np.linspace(.01, 1, col) * self.GUN_L.avg_velo).repeat(row)
v_lin = (np.linspace(.01, 1, col) * z.GUN_L.avg_velo).repeat(row)
CC_lin = np.linspace(1, cc, col).repeat(2)
flip = np.ones(2 * col)
flip[1::2] = flip[1::2]*-1
CC_lin = CC_lin * flip * mu_0 / (2.0 * .05)
self.GUN_L.positions[:,0] = np.asarray([0.0008]*row*col)
self.GUN_L.velocities[:,2] = v_lin
self.GUN_L.coils[:,2] = np.tile(CC_lin, (1,row))
self.GUN_L.coils[:,0][1::2] = 0.05
self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
self.GUN_L.get_conf_times()
self.GUN_L.graph_conf_times()
plt.subplot(122)
hm = sb.heatmap(np.asarray(self.GUN_L.conf_times).reshape(row,col), xticklabels=5, yticklabels=5, robust=False)
hm.invert_yaxis()
plt.title("EGUN Energy max: "+str(ee) + " | Coil Current max: " + str(cc))
plt.show()
def crit_val_show(self,device_id = 2):
num_slices = 1500
crit = 6.4e6
velo = 592999.453328881
v_lin = np.linspace(velo, 10000*velo, num_slices)
CC_lin = v_lin / crit
cc = 10000
#row = cc_slices
#col = ee_slices
self.GUN_L = self.generic_simulation(num_particles = (num_slices), e_gun_z = -.03)
#r_lin = np.tile(np.linspace(0, -0.005, 32 ), (1, 32))[0]
#l_lin = np.linspace(-.01, -.07, 32).repeat(32)
#v_lin = (np.linspace(.01, 1, col) * self.GUN_L.).repeat(row)
#v_lin = (np.linspace(.01, 1, col) * z.GUN_L.).repeat(row)
flip = np.ones(2 * num_slices)
flip[1::2] = flip[1::2]*-1
CC_lin = CC_lin.repeat(2) * flip
#v_lin = CC_lin[0::2] * 10000000.0
# self.GUN_L.positions[:,0] = r_lin.repeat(32)
# self.GUN_L.positions[:,2] = l_lin.repeat(32)
self.GUN_L.velocities[:,2] = v_lin
self.GUN_L.coils[:,2] = CC_lin
self.GUN_L.calculator = self._GPU(path_to_integrator, device_id)
self.GUN_L.r_vals = self.GUN_L.calculator.execute(self.GUN_L)
self.GUN_L.get_conf_times()
self.GUN_L.graph_conf_times()
#plt.subplot(122)
#hm = sb.heatmap(np.asarray(self.GUN_L.conf_times).reshape(num_slices,1), xticklabels=5, yticklabels=5, robust=False)
#hm.invert_yaxis()
#plt.title("EGUN Energy max: "+str(ee) + " | Coil Current max: " + str(cc))
#plt.show()
def active_optimizer(self, device_id = 0, optimizer = 0):
# Spins up an instance for every parameter changed and looks at which parameter positively impacted the simulation.
# Sets new simulation to that paramter and retries over and over until it getss stuck
num_particles = 4
leap_factor = 1.02
parameters = {"sp_charge":-11e-12 , "coil_current": 6990.0 , 'injection_radius': 0.00050, 'velocity': 12e5}
coil_1 = self._COIL( radius = .05, current = parameters['coil_current'], z_pos = 0.0 )
coil_2 = self._COIL( radius = .05, current = -parameters['coil_current'], z_pos = 0.05)
coils = [coil_1.arr, coil_2.arr]
if (optimizer == 0):
self.OPTIMIZER = self.generic_simulation(num_particles = num_particles, e_gun_z = -.03, coil_current = parameters['coil_current'], c_charge = parameters['sp_charge'], injection_radius = parameters['injection_radius'], memory = 12000000)
self.OPTIMIZER.velocities[:,2] = parameters['velocity']
#sel f.OPTIMIZER.coils = [coils
self.OPTIMIZER.calculator = self._GPU(path_to_integrator, device_id)
self.conf_times_over_time = []
for i in range(100):
self.OPTIMIZER.c_spheres *= np.asarray([leap_factor, 1.0, 1.0, 1.0])
self.OPTIMIZER.coils[:,2] *= np.asarray([1.0, leap_factor, 1.0, 1.0]).repeat(2)
self.OPTIMIZER.positions[:,1] *= np.asarray([1.0, 1.0, leap_factor, 1.0])
self.OPTIMIZER.velocities[:,2] *= np.asarray([1.0, 1.0, 1.0, leap_factor])
self.OPTIMIZER.r_vals = self.OPTIMIZER.calculator.execute(self.OPTIMIZER, quiet=True)
self.OPTIMIZER.get_conf_times()
#self.OPTIMIZER.graph_conf_times(markersize = 10)
best_run = np.argmax(self.OPTIMIZER.conf_times)
if best_run == 0:
#print "Raised sp_charge: " + str(self.OPTIMIZER.)
self.OPTIMIZER.coils[:,2] *= np.asarray([1.0, 1.0/leap_factor, 1.0, 1.0]).repeat(2);self.OPTIMIZER.positions[:,1] *= np.asarray([1.0, 1.0, 1.0/leap_factor, 1.0]);self.OPTIMIZER.velocities[:,2] *= np.asarray([1.0, 1.0, 1.0, 1.0/leap_factor])
self.OPTIMIZER.c_spheres =self.OPTIMIZER.c_spheres[0].repeat(4)
if best_run == 1:
self.OPTIMIZER.c_spheres *= np.asarray([1.0/leap_factor, 1.0, 1.0, 1.0]);self.OPTIMIZER.positions[:,1] *= np.asarray([1.0, 1.0, 1.0/leap_factor, 1.0]);self.OPTIMIZER.velocities[:,2] *= np.asarray([1.0, 1.0, 1.0, 1.0/leap_factor])
self.OPTIMIZER.coils[:,2] = np.tile(self.OPTIMIZER.coils[:,2][2:4].reshape(2,1), (4,1)).reshape(8)
if best_run == 2:
self.OPTIMIZER.c_spheres *= np.asarray([1.0/leap_factor, 1.0, 1.0, 1.0]); self.OPTIMIZER.coils[:,2] *= np.asarray([1.0, 1.0/leap_factor, 1.0, 1.0]).repeat(2);self.OPTIMIZER.velocities[:,2] *= np.asarray([1.0, 1.0, 1.0, 1.0/leap_factor])
self.OPTIMIZER.positions[:,1] = self.OPTIMIZER.positions[:,1][2].repeat(4)
if best_run == 3:
self.OPTIMIZER.c_spheres *= np.asarray([1.0/leap_factor, 1.0, 1.0, 1.0]); self.OPTIMIZER.coils[:,2] *= np.asarray([1.0, 1.0/leap_factor, 1.0, 1.0]).repeat(2);self.OPTIMIZER.positions[:,1] *= np.asarray([1.0, 1.0, 1.0/leap_factor, 1.0]);
self.OPTIMIZER.velocities[:,2] = self.OPTIMIZER.velocities[:,2][3].repeat(4)
self.conf_times_over_time.append(np.max(self.OPTIMIZER.conf_times))
print "Stepped: " + str(i) + " | Max Time: " + str(np.max(self.OPTIMIZER.conf_times)) + " Best_run = "+str(best_run)
self.OPTIMIZER.graph_conf_times(markersize = 10)
self.OPTIMIZER.graph_trajectory(best_run)
# now have a simulation with 4 particles, initial charge, current, velocity
#def generic_simulation(self, num_particles = 10000, steps = 9000000, egun_energy = 1000, coil_current = 5000, e_gun_z = -.03, c_charge = 0.0):
#path_to_integrator = '/Users/jonkelley/Desktop/temp_potentia/potential_optimizer/part1.cl'
#z.dim_by_dim()
#z.single_sim()
#z.EGUNvsDIST()
#z.single_sim()
import os
script_path = os.path.abspath(__file__) # i.e. /path/to/dir/foobar.py
script_dir = os.path.split(script_path)[0] #i.e. /path/to/dir/
rel_path = "part1.cl"
#rel_path = "trajectory_conf.cl"
path_to_integrator = os.path.join(script_dir, rel_path)
z = 0;
if __name__ == "__main__":
z = all()
simulations = {
'single':z.single_sim,
'gun_v_l':z.gun_v_l,
'r_v_E':z.r_v_E,
'egunE_v_CC':z.egunE_v_CC,
'crit_val_show':z.crit_val_show,
'active_optimizer':z.active_optimizer,
'paramspace_per_sc':z.paramspace_per_sc,
'paramspace_detailed':z.paramspace_detailed,
'paramspace_single':z.paramspace_single
}
if len(sys.argv) == 1:
# rel_path = 'part1.cl'
print "single sim"
z.single_sim(0)
else:
if sys.argv[1] == "active_optimizer":
if len(sys.argv) == 3:
simulations[sys.argv[1]](int(sys.argv[2]),optimizer = 0)
else:
simulations[sys.argv[1]](int(sys.argv[2]),sys.argv[3])
else:
simulations[sys.argv[1]](int(sys.argv[2]))
# hi
# %run potential_optimizer.py{'single'} {0}
sim = z
``` |
{
"source": "jkelleyrtp/FA19_POE_Final",
"score": 3
} |
#### File: src/realsteel/robot.py
```python
from multiprocessing import Pool, Process, Queue
import time
from enum import Enum
from realsteel.device import ROBOT_DEVICE, FAKE_DEVICE
from realsteel.visualizer import DEMO_VIS, ROBOT_VIS, FAKE_VIS
from realsteel.joint_input import CAMERA, KINECT, HYBRID
from realsteel.kinematic import KSOLVER
from realsteel.pathplanner import PATHPLANNER
from realsteel.kinematic import ArmJoints
# vis_mode = Enum('demo','dev','disabled')
class ROBOT:
"""
This code processes the launch args and sets up all the moving parts to get the REAL STEEL experience up and running.
Nothing in this main loop should blocking, but rather pushing data around between threads. Everything should be async.
"""
def __init__(self, hardware_enabled = False, visualization_mode="disabled"):
# [1] Build the virtual robot from joints
# [2] Set up the camera/kinect inputs to dump raw joint angles
# [3] Set up the kinematic solver that takes raw joint angles and turns into robot angles
# [4] Set up the path planner that maps joint angles frames
# [5] Set up the physical robot interface
# [6] Set up the visualizer
# Process the user flags to process things like dev mode
self.hardware_enabled: bool = hardware_enabled
self.visualization_mode = visualization_mode
# [1] Build the robot from a URDF file
# TODO, allow specifying a custon URDF
self.human_positions: dict = None
self.robot_positions: dict = None
# self.intialize_robot_from_urdf(file = "")
# [2] Set up the camera/kinect input device
self.joint_input = HYBRID()
# [3] Set up the kinematic solver
self.solver = KSOLVER()
# [4] Set up the path planner
self.planner = PATHPLANNER()
# [5] Set up the hardware device
if self.hardware_enabled:
self.device = ROBOT_DEVICE()
else:
self.device = FAKE_DEVICE()
# [6] Set up the visualizer
if self.visualization_mode == "dev":
self.visualizer = ROBOT_VIS(directory='robot/')
elif self.visualization_mode == "demo":
self.visualizer = DEMO_VIS()
else:
self.visualizer = FAKE_VIS()
def start(self):
self.main_loop()
def main_loop(self):
# Set up a shared queue to put human angles into
input_queue = Queue()
# Get the process for the input method and start it
input_proc = self.joint_input.launch(input_queue)
input_proc.start()
# Set up the device queue to push data into
device_queue = Queue()
device_proq = self.device.launch(device_queue)
device_proq.start()
# Set the initial
joints = {}
joint_angles = [1, 1]
while True:
# Check if there's a new human joint inputs ready
# if not input_queue.empty():
joints = input_queue.get()
if joints['Lwri']:
joint_angles = self.solver.solve(joints['Lwri']['pc'])
joint = ArmJoints(joint_angles[0], joint_angles[1], 0.0)
device_queue.push(joint)
``` |
{
"source": "jkellndorfer/qhub",
"score": 2
} |
#### File: qhub/cli/destroy.py
```python
import pathlib
import logging
from qhub.destroy import destroy_configuration
from qhub.schema import verify
from qhub.render import render_template
from qhub.utils import load_yaml
logger = logging.getLogger(__name__)
def create_destroy_subcommand(subparser):
subparser = subparser.add_parser("destroy")
subparser.add_argument("-c", "--config", help="qhub configuration", required=True)
subparser.add_argument("-o", "--output", default="./", help="output directory")
subparser.add_argument(
"--skip-remote-state-provision",
action="store_true",
help="Skip terraform state import and destroy",
)
subparser.add_argument(
"--disable-render",
action="store_true",
help="Disable auto-rendering before destroy",
)
subparser.add_argument(
"--full-only",
action="store_true",
help="Only carry out one full pass instead of targeted sections",
)
subparser.set_defaults(func=handle_destroy)
def handle_destroy(args):
config_filename = pathlib.Path(args.config)
if not config_filename.is_file():
raise ValueError(
f"passed in configuration filename={config_filename} must exist"
)
config = load_yaml(config_filename)
verify(config)
if not args.disable_render:
render_template(args.output, args.config, force=True)
destroy_configuration(
config,
args.skip_remote_state_provision,
args.full_only,
)
```
#### File: qhub/qhub/initialize.py
```python
import os
import re
import string
import random
import secrets
import tempfile
import logging
import requests
from qhub.provider.oauth.auth0 import create_client
from qhub.provider.cicd import github
from qhub.provider import git
from qhub.provider.cloud import digital_ocean, azure_cloud
from qhub.utils import namestr_regex, qhub_image_tag, check_cloud_credentials
from .version import __version__
logger = logging.getLogger(__name__)
BASE_CONFIGURATION = {
"project_name": None,
"provider": None,
"domain": None,
"certificate": {
"type": "self-signed",
},
"security": {
"authentication": None,
},
"default_images": {
"jupyterhub": f"quansight/qhub-jupyterhub:{qhub_image_tag}",
"jupyterlab": f"quansight/qhub-jupyterlab:{qhub_image_tag}",
"dask_worker": f"quansight/qhub-dask-worker:{qhub_image_tag}",
"dask_gateway": f"quansight/qhub-dask-gateway:{qhub_image_tag}",
"conda_store": f"quansight/qhub-conda-store:{qhub_image_tag}",
},
"storage": {"conda_store": "60Gi", "shared_filesystem": "100Gi"},
"theme": {
"jupyterhub": {
"hub_title": None,
"hub_subtitle": None,
"welcome": None,
"logo": "/hub/custom/images/jupyter_qhub_logo.svg",
"primary_color": "#4f4173",
"secondary_color": "#957da6",
"accent_color": "#32C574",
"text_color": "#111111",
"h1_color": "#652e8e",
"h2_color": "#652e8e",
}
},
"helm_extensions": [],
"monitoring": {
"enabled": True,
},
"cdsdashboards": {
"enabled": True,
"cds_hide_user_named_servers": True,
"cds_hide_user_dashboard_servers": False,
},
}
CICD_CONFIGURATION = {"type": "PLACEHOLDER", "branch": "main"}
AUTH_PASSWORD = {
"type": "password",
}
AUTH_OAUTH_GITHUB = {
"type": "GitHub",
"config": {
"client_id": "PLACEHOLDER",
"client_secret": "PLACEHOLDER",
},
}
AUTH_OAUTH_AUTH0 = {
"type": "Auth0",
"config": {
"client_id": "PLACEHOLDER",
"client_secret": "PLACEHOLDER",
"auth0_subdomain": "PLACEHOLDER",
},
}
LOCAL = {
"node_selectors": {
"general": {
"key": "kubernetes.io/os",
"value": "linux",
},
"user": {
"key": "kubernetes.io/os",
"value": "linux",
},
"worker": {
"key": "kubernetes.io/os",
"value": "linux",
},
}
}
DIGITAL_OCEAN = {
"region": "nyc3",
"kubernetes_version": "PLACEHOLDER",
"node_groups": {
"general": {"instance": "g-4vcpu-16gb", "min_nodes": 1, "max_nodes": 1},
"user": {"instance": "g-2vcpu-8gb", "min_nodes": 1, "max_nodes": 5},
"worker": {"instance": "g-2vcpu-8gb", "min_nodes": 1, "max_nodes": 5},
},
}
# Digital Ocean image slugs are listed here https://slugs.do-api.dev/
GOOGLE_PLATFORM = {
"project": "PLACEHOLDER",
"region": "us-central1",
"kubernetes_version": "1.18.16-gke.502",
"node_groups": {
"general": {"instance": "n1-standard-4", "min_nodes": 1, "max_nodes": 1},
"user": {"instance": "n1-standard-2", "min_nodes": 0, "max_nodes": 5},
"worker": {"instance": "n1-standard-2", "min_nodes": 0, "max_nodes": 5},
},
}
AZURE = {
"region": "Central US",
"kubernetes_version": "PLACEHOLDER",
"node_groups": {
"general": {
"instance": "Standard_D4_v3",
"min_nodes": 1,
"max_nodes": 1,
},
"user": {"instance": "Standard_D2_v2", "min_nodes": 0, "max_nodes": 5},
"worker": {
"instance": "Standard_D2_v2",
"min_nodes": 0,
"max_nodes": 5,
},
},
"storage_account_postfix": "".join(
random.choices("abcdefghijklmnopqrstuvwxyz0123456789", k=8)
),
}
AMAZON_WEB_SERVICES = {
"region": "us-west-2",
"kubernetes_version": "1.18",
"node_groups": {
"general": {"instance": "m5.xlarge", "min_nodes": 1, "max_nodes": 1},
"user": {"instance": "m5.large", "min_nodes": 1, "max_nodes": 5},
"worker": {"instance": "m5.large", "min_nodes": 1, "max_nodes": 5},
},
}
DEFAULT_PROFILES = {
"jupyterlab": [
{
"display_name": "Small Instance",
"description": "Stable environment with 1 cpu / 4 GB ram",
"default": True,
"kubespawner_override": {
"cpu_limit": 1,
"cpu_guarantee": 0.75,
"mem_limit": "4G",
"mem_guarantee": "2.5G",
"image": f"quansight/qhub-jupyterlab:{qhub_image_tag}",
},
},
{
"display_name": "Medium Instance",
"description": "Stable environment with 2 cpu / 8 GB ram",
"kubespawner_override": {
"cpu_limit": 2,
"cpu_guarantee": 1.5,
"mem_limit": "8G",
"mem_guarantee": "5G",
"image": f"quansight/qhub-jupyterlab:{qhub_image_tag}",
},
},
],
"dask_worker": {
"Small Worker": {
"worker_cores_limit": 1,
"worker_cores": 0.75,
"worker_memory_limit": "4G",
"worker_memory": "2.5G",
"worker_threads": 1,
"image": f"quansight/qhub-dask-worker:{qhub_image_tag}",
},
"Medium Worker": {
"worker_cores_limit": 2,
"worker_cores": 1.5,
"worker_memory_limit": "8G",
"worker_memory": "5G",
"worker_threads": 2,
"image": f"quansight/qhub-dask-worker:{qhub_image_tag}",
},
},
}
DEFAULT_ENVIRONMENTS = {
"environment-dask.yaml": {
"name": "dask",
"channels": ["conda-forge"],
"dependencies": [
"python",
"ipykernel",
"ipywidgets",
"qhub-dask ==0.3.13",
"python-graphviz",
"numpy",
"numba",
"pandas",
],
},
"environment-dashboard.yaml": {
"name": "dashboard",
"channels": ["conda-forge"],
"dependencies": [
"python==3.9.7",
"ipykernel==6.4.1",
"ipywidgets==7.6.5",
"qhub-dask==0.3.13",
"param==1.11.1",
"python-graphviz==0.17",
"matplotlib==3.4.3",
"panel==0.12.4",
"voila==0.2.16",
"streamlit==1.0.0",
"dash==2.0.0",
"cdsdashboards-singleuser==0.6.0",
],
},
}
def render_config(
project_name,
qhub_domain,
cloud_provider,
ci_provider,
repository,
auth_provider,
namespace=None,
repository_auto_provision=False,
auth_auto_provision=False,
terraform_state=None,
kubernetes_version=None,
disable_prompt=False,
ssl_cert_email=None,
):
config = BASE_CONFIGURATION
config["provider"] = cloud_provider
if ci_provider is not None and ci_provider != "none":
config["ci_cd"] = {"type": ci_provider, "branch": "main"}
if terraform_state is not None:
config["terraform_state"] = {"type": terraform_state}
config["theme"]["jupyterhub"]["hub_title"] = f"QHub - { project_name }"
config["theme"]["jupyterhub"][
"welcome"
] = f"""Welcome to { qhub_domain }. It is maintained by <a href="http://quansight.com">Quansight staff</a>. The hub's configuration is stored in a github repository based on <a href="https://github.com/Quansight/qhub/">https://github.com/Quansight/qhub/</a>. To provide feedback and report any technical problems, please use the <a href="https://github.com/Quansight/qhub/issues">github issue tracker</a>."""
if project_name is None and not disable_prompt:
project_name = input("Provide project name: ")
config["project_name"] = project_name
if not re.match(namestr_regex, project_name):
raise ValueError(
"project name should contain only letters and hyphens/underscores (but not at the start or end)"
)
if namespace is not None:
config["namespace"] = namespace
if not re.match(namestr_regex, namespace):
raise ValueError(
"namespace should contain only letters and hyphens/underscores (but not at the start or end)"
)
if qhub_domain is None and not disable_prompt:
qhub_domain = input("Provide domain: ")
config["domain"] = qhub_domain
config["qhub_version"] = __version__
# Generate default password for Keycloak root user and also example-user if using password auth
default_password = "".join(
secrets.choice(string.ascii_letters + string.digits) for i in range(16)
)
# Save default password to file
default_password_filename = os.path.join(
tempfile.gettempdir(), "QHUB_DEFAULT_PASSWORD"
)
with open(default_password_filename, "w") as f:
f.write(default_password)
os.chmod(default_password_filename, 0o700)
print(
f"Securely generated default random password={<PASSWORD>} for Keycloak root user stored at path={default_password_filename}"
)
if auth_provider == "github":
config["security"]["authentication"] = AUTH_OAUTH_GITHUB
print(
"Visit https://github.com/settings/developers and create oauth application"
)
print(f" set the homepage to: https://{qhub_domain}/")
print(
f" set the callback_url to: https://{qhub_domain}/auth/realms/qhub/broker/github/endpoint"
)
if not disable_prompt:
config["security"]["authentication"]["config"]["client_id"] = input(
"Github client_id: "
)
config["security"]["authentication"]["config"]["client_secret"] = input(
"Github client_secret: "
)
elif auth_provider == "auth0":
config["security"]["authentication"] = AUTH_OAUTH_AUTH0
elif auth_provider == "password":
config["security"]["authentication"] = AUTH_PASSWORD
# Always use default password for keycloak root
config["security"].setdefault("keycloak", {})[
"initial_root_password"
] = default_password
if cloud_provider == "do":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment on Digital Ocean"
config["digital_ocean"] = DIGITAL_OCEAN
if kubernetes_version:
config["digital_ocean"]["kubernetes_version"] = kubernetes_version
else:
# first kubernetes version returned by Digital Ocean api is
# the newest version of kubernetes supported this field needs
# to be dynamically filled since digital ocean updates the
# versions so frequently
config["digital_ocean"][
"kubernetes_version"
] = digital_ocean.kubernetes_versions()[0]["slug"]
elif cloud_provider == "gcp":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment on Google Cloud Platform"
config["google_cloud_platform"] = GOOGLE_PLATFORM
if kubernetes_version:
config["google_cloud_platform"]["kubernetes_version"] = kubernetes_version
if "PROJECT_ID" in os.environ:
config["google_cloud_platform"]["project"] = os.environ["PROJECT_ID"]
elif not disable_prompt:
config["google_cloud_platform"]["project"] = input(
"Enter Google Cloud Platform Project ID: "
)
elif cloud_provider == "azure":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment on Azure"
config["azure"] = AZURE
if kubernetes_version:
config["azure"]["kubernetes_version"] = kubernetes_version
else:
# first kubernetes version returned by azure sdk is
# the newest version of kubernetes supported this field needs
# to be dynamically filled since azure updates the
# versions so frequently
config["azure"]["kubernetes_version"] = azure_cloud.kubernetes_versions(
config["azure"]["region"]
)[0]
elif cloud_provider == "aws":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment on Amazon Web Services"
config["amazon_web_services"] = AMAZON_WEB_SERVICES
if kubernetes_version:
config["amazon_web_services"]["kubernetes_version"] = kubernetes_version
if "AWS_DEFAULT_REGION" in os.environ:
config["amazon_web_services"]["region"] = os.environ["AWS_DEFAULT_REGION"]
elif cloud_provider == "local":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment"
config["local"] = LOCAL
config["profiles"] = DEFAULT_PROFILES
config["environments"] = DEFAULT_ENVIRONMENTS
if ssl_cert_email is not None:
if not re.match("^[^ @]+@[^ @]+\\.[^ @]+$", ssl_cert_email):
raise ValueError("ssl-cert-email should be a valid email address")
config["certificate"] = {
"type": "lets-encrypt",
"acme_email": ssl_cert_email,
"acme_server": "https://acme-v02.api.letsencrypt.org/directory",
}
if auth_auto_provision:
if auth_provider == "auth0":
auth0_auto_provision(config)
if repository_auto_provision:
GITHUB_REGEX = "(https://)?github.com/([^/]+)/([^/]+)/?"
if re.search(GITHUB_REGEX, repository):
match = re.search(GITHUB_REGEX, repository)
git_repository = github_auto_provision(
config, match.group(2), match.group(3)
)
git_repository_initialize(git_repository)
else:
raise ValueError(
f"Repository to be auto-provisioned is not the full URL of a GitHub repo: {repository}"
)
return config
def github_auto_provision(config, owner, repo):
check_cloud_credentials(
config
) # We may need env vars such as AWS_ACCESS_KEY_ID depending on provider
already_exists = True
try:
github.get_repository(owner, repo)
except requests.exceptions.HTTPError:
# repo not found
already_exists = False
if not already_exists:
try:
github.create_repository(
owner,
repo,
description=f'QHub {config["project_name"]}-{config["provider"]}',
homepage=f'https://{config["domain"]}',
)
except requests.exceptions.HTTPError as he:
raise ValueError(
f"Unable to create GitHub repo https://github.com/{owner}/{repo} - error message from GitHub is: {he}"
)
else:
logger.warn(f"GitHub repo https://github.com/{owner}/{repo} already exists")
try:
# Secrets
if config["provider"] == "do":
for name in {
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"SPACES_ACCESS_KEY_ID",
"SPACES_SECRET_ACCESS_KEY",
"DIGITALOCEAN_TOKEN",
}:
github.update_secret(owner, repo, name, os.environ[name])
elif config["provider"] == "aws":
for name in {
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
}:
github.update_secret(owner, repo, name, os.environ[name])
elif config["provider"] == "gcp":
github.update_secret(owner, repo, "PROJECT_ID", os.environ["PROJECT_ID"])
with open(os.environ["GOOGLE_CREDENTIALS"]) as f:
github.update_secret(owner, repo, "GOOGLE_CREDENTIALS", f.read())
elif config["provider"] == "azure":
for name in {
"ARM_CLIENT_ID",
"ARM_CLIENT_SECRET",
"ARM_SUBSCRIPTION_ID",
"ARM_TENANT_ID",
}:
github.update_secret(owner, repo, name, os.environ[name])
github.update_secret(
owner, repo, "REPOSITORY_ACCESS_TOKEN", os.environ["GITHUB_TOKEN"]
)
except requests.exceptions.HTTPError as he:
raise ValueError(
f"Unable to set Secrets on GitHub repo https://github.com/{owner}/{repo} - error message from GitHub is: {he}"
)
return f"[email protected]:{owner}/{repo}.git"
def git_repository_initialize(git_repository):
if not git.is_git_repo("./"):
git.initialize_git("./")
git.add_git_remote(git_repository, path="./", remote_name="origin")
def auth0_auto_provision(config):
auth0_config = create_client(config["domain"], config["project_name"])
config["security"]["authentication"]["config"]["client_id"] = auth0_config[
"client_id"
]
config["security"]["authentication"]["config"]["client_secret"] = auth0_config[
"client_secret"
]
config["security"]["authentication"]["config"]["auth0_subdomain"] = auth0_config[
"auth0_subdomain"
]
``` |
{
"source": "jkelowitt/GenStrIde",
"score": 3
} |
#### File: GenStrIde/utils/DataContainer.py
```python
import numpy as np
import random
import sys
class data_t(object):
def __init__(self, data, labels=None):
self.labels = labels
self.data = data
self.num_examples = data.shape[0]
def next_batch(self, batch_size, index):
idx = index * batch_size
n_idx = index * batch_size + batch_size
return self.data[idx:n_idx, :], self.labels[idx:n_idx, :]
# expects a numpy array of data and a corresponding numpy array of labels
# samples on the rows, features on the columns
class DataContainer:
def __init__(self, data, labels, train_split=0.8, test_split=0.2):
assert(data.shape[0] == labels.shape[0])
self.num_classes = labels.shape[1]
self.class_counts = {}
self.train, self.test = self.partition(data, labels, train_split, test_split)
# Shuffle training dataset (when creating dataset)
def shuffle_and_transform(self, data, labels):
stacked_d = np.vstack(data)
stacked_l = np.vstack(labels)
samples = random.sample(range(stacked_d.shape[0]),stacked_d.shape[0])
# convert lists to numpy arrays
stacked_d = stacked_d[samples]
stacked_l = stacked_l[samples]
return data_t(stacked_d, stacked_l)
# Shuffle training dataset (in between epochs)
def shuffle(self):
idxs = np.arange(self.train.data.shape[0])
np.random.shuffle(idxs)
self.train.data = np.squeeze(self.train.data[idxs])
self.train.labels = np.squeeze(self.train.labels[idxs])
def partition(self, data, labels, train_split=0.8, test_split=0.2):
x_train = []
y_train = []
x_test = []
y_test = []
for i in range(self.num_classes):
# find where the labels are equal to the certain class
idxs = np.where(np.argmax(labels, axis=1) == i)[0]
np.random.shuffle(idxs)
# record the class count information
self.class_counts[str(i)] = idxs.shape[0]
# get the int that splits the train/test sets
split = int(train_split * idxs.shape[0])
# append class data to respective lists
x_train.append(data[idxs[:split]])
y_train.append(labels[idxs[:split]])
x_test.append(data[idxs[split:]])
y_test.append(labels[idxs[split:]])
# format into datacontainer
train = self.shuffle_and_transform(x_train, y_train)
test = self.shuffle_and_transform(x_test, y_test)
return [train, test]
``` |
{
"source": "jkelowitt/t-builder",
"score": 3
} |
#### File: jkelowitt/t-builder/tests.py
```python
from unittest import TestCase, main
from titration_class import array, Titration
from compounds import strong_acids, strong_bases, weak_acids, weak_bases
class TestTitrationClassModule(TestCase):
def setUp(self):
self.titrations = []
for t in strong_acids:
for a in weak_bases:
self.titrations.append(
Titration(
analyte=a, titrant=t, volume_analyte=25, concentration_titrant=0.10, concentration_analyte=0.10
)
)
for t in strong_bases:
for a in weak_acids:
self.titrations.append(
Titration(
analyte=a, titrant=t, volume_analyte=25, concentration_titrant=0.10, concentration_analyte=0.10
)
)
"""First Derivative Tests"""
def test_first_derivative_can_be_made(self):
for titration in self.titrations:
self.assertIsNotNone(titration.deriv(1))
def test_first_derivative_volume_size_is_correct(self):
for titration in self.titrations:
volume, derivative = array(titration.deriv(1))
self.assertEqual(len(volume), len(titration.ph_t))
"""Second Derivative Tests"""
def test_second_derivative_can_be_made(self):
for titration in self.titrations:
self.assertIsNotNone(titration.deriv(2))
def test_second_derivative_volume_size_is_correct(self):
for titration in self.titrations:
volume, derivative = array(titration.deriv(2))
self.assertEqual(len(volume), len(titration.volume_titrant_t))
# # """File Tests""" This takes too long. Involves the process of creating >450 MB of test_data. Consolidate later.
#
# def test_titration_file_can_be_made(self):
# for titration in self.titrations:
# titration.write_titration_data()
# self.assertTrue(path.exists("Titration Curve Data.csv"))
# remove("Titration Curve Data.csv")
#
# def test_relative_species_file_can_be_made(self):
# for titration in self.titrations:
# titration.write_alpha_data()
# self.assertTrue(path.exists("Alpha Value Data.csv"))
# remove("Alpha Value Data.csv")
#
# def test_titration_file_has_data(self):
# for titration in self.titrations:
# titration.write_titration_data(file_headers=True)
# data = read_csv("Titration Curve Data.csv")
# self.assertIsNotNone(data.head())
# remove("Titration Curve Data.csv")
#
# def test_relative_species_file_has_data(self):
# for titration in self.titrations:
# titration.write_alpha_data(file_headers=True)
# data = read_csv("Alpha Value Data.csv")
# self.assertIsNotNone(data.head())
# remove("Alpha Value Data.csv")
#
# def test_titration_file_has_correct_data(self):
# for titration in self.titrations:
# titration.write_titration_data(file_headers=True)
# data = read_csv("Titration Curve Data.csv")
# check = read_csv(
# f"test_data/{titration.analyte.name}_{titration.titrant.name}_titration_data.csv".replace(" ",
# "_").lower()
# )
# self.assertDictEqual(data.to_dict(), check.to_dict())
#
# remove("Titration Curve Data.csv")
#
# def test_relative_species_file_has_correct_data(self):
# for titration in self.titrations:
# titration.write_alpha_data(file_headers=True)
# data = read_csv("Alpha Value Data.csv")
# check = read_csv(
# f"test_data/{titration.analyte.name}_{titration.titrant.name}_alpha_data.csv".replace(" ", "_").lower())
# self.assertDictEqual(data.head().to_dict(), check.head().to_dict())
#
# remove("Alpha Value Data.csv")
#
# def test_analysis_file_has_correct_data(self):
# for titration in self.titrations:
# titration.write_analysis_data(file_headers=True)
# data = read_csv("Analysis Data.csv")
# check = read_csv(
# f"test_data/{titration.analyte.name}_{titration.titrant.name}_analysis_data.csv".replace(" ",
# "_").lower()
# )
#
# self.assertDictEqual(data.head(2).to_dict(), check.head(2).to_dict())
#
# remove("Analysis Data.csv")
"""Value checks"""
def test_values_have_same_length(self):
for titration in self.titrations:
untrimmed = [
titration.volume_titrant,
titration.ph,
titration.hydronium,
titration.hydroxide,
titration.alpha_analyte,
]
length = len(untrimmed[0])
for value in untrimmed:
self.assertEqual(
len(value),
length,
)
def test_trimmed_values_have_same_length(self):
for titration in self.titrations:
trimmed = titration.trim_values(
titration.volume_titrant,
titration.ph,
titration.hydronium,
titration.hydroxide,
titration.alpha_analyte,
)
length = len(next(trimmed))
for value in trimmed:
self.assertEqual(len(value), length)
def test_trimmed_values_have_less_values_than_untrimmed_values(self):
for titration in self.titrations:
trimmed = titration.trim_values(
titration.volume_titrant,
titration.ph,
titration.hydronium,
titration.hydroxide,
titration.alpha_analyte,
)
untrimmed = [
titration.volume_titrant,
titration.ph,
titration.hydronium,
titration.hydroxide,
titration.alpha_analyte,
]
for trim in trimmed:
for untrim in untrimmed:
self.assertTrue(len(trim) <= len(untrim))
"""Functions Check"""
def test_scaled_data_less_than_one(self):
for titration in self.titrations:
scaled = titration._scale_data(titration.ph, 1)
for scale in scaled:
self.assertTrue(scale <= 1)
def test_alpha_index_scaling(self):
test_list = array([[5, 4, 3, 2, 1] for _ in range(200)])
f = self.titrations[0].scale_alphas(test_list)
for sl in f:
self.assertSequenceEqual(list(sl), [0, 4, 6, 6, 4])
if __name__ == "__main__":
main()
``` |
{
"source": "jkelowitt/titration-generator",
"score": 3
} |
#### File: jkelowitt/titration-generator/T-Builder.py
```python
from webbrowser import open
from dearpygui.core import *
from dearpygui.simple import *
import data_writing as dw
from titration_class import Compound, Titration
__author__ = "jkelowitt"
__version__ = "v2.3.4"
__license__ = "MIT"
plot_width = 905
plot_height = 755
data_width = 200
def open_link(sender, data):
open("https://github.com/jkelowitt/t-builder", new=2)
def query(sender, data):
set_plot_xlimits(sender, data[0], data[1])
set_plot_ylimits(sender, data[2], data[3])
def make_titration():
# Create compounds
Analyte = Compound(
name=get_value("Analyte Name"),
acidic=get_value("aa"),
pKas=[float(i) for i in get_value("apk").split(",")],
)
Titrant = Compound(
name=get_value("tname"),
acidic=not get_value("aa"),
pKas=[float(i) for i in get_value("tpk").split(",")],
)
# Create titration object
titr = Titration(
analyte=Analyte,
titrant=Titrant,
concentration_analyte=get_value("aconc"),
concentration_titrant=get_value("tconc"),
volume_analyte=get_value("avol"),
decimal_places=get_value("precision"),
temp=get_value("temperature"),
)
return titr
def plot_callback(sender, data):
clear_plot("Main Plot")
# Band-aid fix to issue #10. Everything which needs to be kept in a certain range must have callback_data
try:
value = get_value(sender)
if value < data[0]:
set_value(sender, data[0])
clear_plot("Main Plot")
return
except: # If the widget doesn't give you a number to check, don't check its value.
pass
titr = make_titration()
if sender == "b_tab_button": # Everything but the buffer tab button
# Perform bjerrum calculations
bx = list(titr.ph)
bys = [list(alpha) for alpha in titr.alpha_analyte.T]
# For every alpha value list, plot the alpha values at every pH and add the line to the plot
for num, alpha in enumerate(bys):
add_line_series(plot="Main Plot", name=f"species{num}", x=bx, y=alpha, weight=2)
# Relabel the plot, and x and y axes
configure_item("Main Plot", label="Speciation Plot", x_axis_name="pH", y_axis_name="Relative Speciation")
else:
# Relabel the plot, and x and y axes
configure_item("Main Plot", label="Titration", x_axis_name="Volume (mL)", y_axis_name="pH")
# Perform titration calculations
tx = list(titr.volume_titrant_t)
ty = list(titr.ph_t)
# plot the calculations
add_line_series(
plot="Main Plot",
name="Titration Curve",
x=tx,
y=ty,
weight=2,
color=[0, 255, 255, 255], # Cyan
)
if get_value("buffer_regions"):
vols, pHs = titr.find_buffer_points()
vols = list(vols)
# It's impossible to have a pH > 14 in water, don't find the buffers for a solution which cannot exist.
pHs = [x for x in pHs if x < 14]
add_scatter_series(
plot="Main Plot",
name="Buffer Points",
x=vols,
y=pHs,
fill=[255, 0, 0, 255], # Red
outline=[255, 0, 0, 255], # Red
weight=2,
)
# Add labels to the volumes of each point
for vol, pH in zip(vols, pHs):
if titr.analyte.acidic:
add_annotation("Main Plot", x=vol, y=pH, text=f"{vol:.5g} mL", xoffset=5, yoffset=5)
else:
# Annotations need to be above the line if the solution is basic to prevent the line from clipping
add_annotation(
"Main Plot",
x=vol,
y=pH,
text=f"{vol:.5g} mL",
xoffset=5,
yoffset=-5,
)
if get_value("equiv"):
vols, pHs = titr.find_equiv_points()
vols = list(vols)
pHs = list(pHs)
add_scatter_series(
plot="Main Plot",
name="Equivalence Points",
x=vols,
y=pHs,
fill=[0, 255, 0, 255], # Green
outline=[0, 255, 0, 255], # Green
weight=2,
)
# Add labels to the volumes of each point
for vol, pH in zip(vols, pHs):
if titr.analyte.acidic:
add_annotation("Main Plot", x=vol, y=pH, text=f"{vol:.5g} mL", xoffset=5, yoffset=5)
else:
# Annotations need to be above the line if the solution is basic to prevent the line from clipping
add_annotation(
"Main Plot",
x=vol,
y=pH,
text=f"{vol:.5g} mL",
xoffset=5,
yoffset=-5,
)
if get_value("1stderiv"):
volume, pHderiv = titr.deriv(degree=1)
data = titr._scale_data(pHderiv, get_value("1dscaler"))
add_line_series(
plot="Main Plot",
name="First Derivative",
x=list(volume),
y=list(data),
weight=2,
color=[255, 0, 255, 255], # Purple
)
if get_value("2ndderiv"):
volume, pHderiv = titr.deriv(degree=2)
data = titr._scale_data(pHderiv, get_value("2dscaler"))
add_line_series(
plot="Main Plot",
name="Second Derivative",
x=list(volume),
y=list(data),
weight=2,
color=[255, 255, 0, 255], # Yellow
)
def save_titr_data(sender, data):
titr = make_titration()
title = f"{get_value('aname')}_{get_value('tname')}_titration".replace(" ", "_")
dw.write_titration_data(titr, title=title)
def save_bjer_data(sender, data):
titr = make_titration()
title = f"{get_value('aname')}_{get_value('tname')}_species".replace(" ", "_")
dw.write_alpha_data(titr, title=title)
def save_ana_data(sender, data):
titr = make_titration()
title = f"{get_value('aname')}_{get_value('tname')}_analysis".replace(" ", "_")
dw.write_analysis_data(titr, title=title)
# Main gui formatting
with window("Main Window", label="Something Else", autosize=True):
set_main_window_size(width=1270, height=850)
with tab_bar("Main Tab bar"):
add_tab_button("t_tab_button", label="Titration", callback=plot_callback)
add_tab_button("b_tab_button", label="Speciation", callback=plot_callback)
add_same_line()
add_text(" " * 127) # TODO ask for right adjust text on DPG
add_same_line()
add_button("T-Builder by Jkelowitt", callback=open_link, tip="Open in github in browser")
with group("Data Entry", width=data_width):
add_text("Analyte Data")
add_input_text(
"aname",
label="Name",
default_value="Citric Acid",
callback=plot_callback,
tip="Enter the name of the analyte. This is used when making the data files.",
)
add_input_float(
"aconc",
label="Concentration (M)",
default_value=0.10,
callback=plot_callback,
callback_data=[0],
tip="Enter the concentration of the analyte in molarity.",
)
add_input_text(
"apk",
label="pKa value(s)",
default_value="3.13, 4.76, 6.40",
callback=plot_callback,
tip="Enter the pKa values of the analyte. Separate them with commas if there are more than one.",
)
add_input_float(
"avol",
label="Volume (mL)",
default_value=25,
callback=plot_callback,
callback_data=[0],
tip="Enter the volume of the analyte in mL.",
)
add_checkbox(
"aa",
label="Acidic",
default_value=True,
callback=plot_callback,
tip="Check this box if the analyte acts as an acid during this titration.",
) # TODO This may be automatable. Think "if pKa_a > pKa_t, then..."
add_dummy(height=25)
add_text("Titrant Data")
add_input_text(
"tname",
label="Name",
default_value="KOH",
callback=plot_callback,
tip="Enter the name of the titrant. This is used when naming the data files.",
)
add_input_float(
"tconc",
label="Concentration (M)",
default_value=0.10,
callback=plot_callback,
callback_data=[0],
tip="Enter the concentration of the titrant in molarity.",
)
add_input_text(
"tpk",
label="pKa value(s)",
default_value="14.76",
callback=plot_callback,
tip="Enter the pKa values of the titrant. Separate them with commas if there are more than one.",
)
add_dummy(height=25)
add_text("Titration Settings")
add_input_int(
"precision",
label="Number of Points",
default_value=2,
callback=plot_callback,
tip="The number of pH points to calculate. (10^n items)",
width=65,
)
add_input_int(
"temperature",
label="Temperature (C)",
default_value=25,
callback=plot_callback,
tip="The temperature at which the titration occurs. (0 - 100 C)",
width=65,
)
add_dummy(height=25)
add_text("Perform Titration Analysis")
add_checkbox(
"buffer_regions",
label="Show Buffering Points",
default_value=False,
callback=plot_callback,
tip="Show the center of the buffering regions on the Titration plot.",
)
add_checkbox(
"equiv",
label="Show Equivalence Points",
default_value=False,
callback=plot_callback,
tip="Show the equivalence points on the Titration plot.",
)
add_checkbox(
"1stderiv",
label="Show normalized y'",
default_value=False,
callback=plot_callback,
tip="Show the normalized 1st Derivative of the Titration plot",
)
add_checkbox(
"2ndderiv",
label="Show normalized y''",
default_value=False,
callback=plot_callback,
tip="Show the normalized 2nd Derivative of the Titration plot.",
)
add_drag_float(
"1dscaler",
label="Scale y'",
default_value=8,
min_value=1,
speed=0.1,
width=80,
format="%0.2f",
callback=plot_callback,
tip="Scale the 1st Derivative of the Titration plot.",
)
add_drag_float(
"2dscaler",
label="Scale y''",
default_value=2,
min_value=1,
speed=0.1,
width=80,
format="%0.2f",
callback=plot_callback,
tip="Scale the 2nd Derivative of the Titration plot.",
)
add_dummy(height=25)
add_text("Save Data to CSV")
add_button("Save Titration Data ", callback=save_titr_data)
add_button("Save Speciation Data", callback=save_bjer_data)
add_button("Save Analysis Data", callback=save_ana_data)
# Put the titration curve under the data entry section
add_same_line()
with group("TitrationPlotGroup"):
add_plot(
"Main Plot",
label="Titration Curve",
query_callback=query,
width=plot_width,
height=plot_height,
anti_aliased=True,
x_axis_name="Volume (ml)",
y_axis_name="pH",
)
plot_callback("equiv", []) # Make the plots appear on program start
# Run the curve.
if __name__ == "__main__":
start_dearpygui(primary_window="Main Window")
```
#### File: jkelowitt/titration-generator/titration_class.py
```python
from dataclasses import dataclass, field
from typing import List, Tuple, Generator, Any
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
def pk_to_k(pk) -> np.array:
"""Convert pK values to K values"""
return np.array(10.0 ** (-np.array(pk)))
def closest_value(num: float, arr: np.array) -> float:
"""Returns the closest value to the number in the array."""
return min(arr, key=lambda x: np.abs(x - num))
@dataclass
class Compound:
"""
Main class used to contain information about a specific compound
Parameters
----------
name : A string which holds the name of the compound
acidic : A boolean which represents whether or not the compound is acidic. True --> Acidic, False -> Basic
pKas: A list of floats which represents the pKa values of the compound.
"""
name: str
acidic: bool
pKas: list[float]
def __post_init__(self):
# The k values can become zero if the pKa value is too large ~> 330.
self.ks: np.array = np.array(10.0 ** (-np.array(self.pKas)))
@dataclass
class Titration:
"""
Main Titration Class. Performs the titration of an analyte with a titrant
given their concentrations and volumes.
Parameters
----------
analyte : A Compound class which represents the analyte of the titration
titrant : A Compound class which represents the titrant of the titration
concentration_analyte : A float which represents the concentration of the analyte
concentration_titrant : A float which represents the concentration of the titrant
volume_analyte : A float which represents the volume of analyte being titrated
Optional Parameters
-------------------
pKw : A custom value for the pKw of water. Default is None.
temp : A custom temperature for the temperature for the titration to take place.
Default is 25C. If pKw is None, this value is used to calculate the pKw at 25C.
decimal_places : The number of decimal places the titration should be simulated to. Default is 2 (2 -> 0.01).
"""
analyte: Compound
titrant: Compound
concentration_analyte: float
concentration_titrant: float
volume_analyte: float
pKw: float = field(default=None)
temp: float = field(default=25)
decimal_places: int = field(default=2)
def __post_init__(self):
"""Important values to calculate after the initialization"""
# Calculate the pKw
if self.pKw is not None: # If given a pKw
self.kw = 10 ** (-self.pKw)
else: # If given a temperature
self.kw = 10 ** (-self.temp_kw(self.temp))
# The increment level for the value ranges
self.precision: int = 10 ** -self.decimal_places
# Value ranges
self.ph, self.hydronium, self.hydroxide = self.starting_phs()
# Calculate the alpha values for the compounds at each pH
self.alpha_analyte = self.alpha_values(k=self.analyte.ks, acid=self.analyte.acidic)
self.alpha_titrant = self.alpha_values(k=self.titrant.ks, acid=self.titrant.acidic)
# Calculate and trim the volumes.
self.volume_titrant, self.phi = self.calculate_volume(self.titrant.acidic)
self.ph_t, self.volume_titrant_t = self.trim_values(self.ph, self.volume_titrant)
def starting_phs(self, min_ph: float = None, max_ph: float = None) -> Tuple[np.array, np.array, np.array]:
"""Returns a range of pH, hydronium concentration, and hydroxide concentrations"""
if min_ph is None:
min_ph = (14 * (not self.analyte.acidic)) - np.log10(self.concentration_analyte)
if max_ph is None:
max_ph = (14 * (not self.titrant.acidic)) - np.log10(self.concentration_analyte)
if self.analyte.acidic:
ph = np.arange(min_ph, max_ph, self.precision)
else: # Swap max and min pH so that the proper volume order is preserved.
ph = np.arange(max_ph, min_ph, self.precision)
h = 10 ** (-ph)
oh = self.kw / h
return ph, h, oh
@staticmethod
def temp_kw(temp: float) -> float:
"""Returns the pKw of water given a certain temperature in celsius."""
# Quadratic approximation of the data for liquid water found here:
# https://www.engineeringtoolbox.com/ionization-dissociation-autoprotolysis-constant-pKw-water-heavy-deuterium-oxide-d_2004.html
# 0 <= T <= 95 C
# R^2 = 0.9992
a = 0.000128275
b = -0.0406144
c = 14.9368
pKw = (a * temp ** 2) + (b * temp) + c
return pKw
@staticmethod
def _scale_data(data: np.array, a: float) -> np.array:
"""abs normalization"""
return a * (data / (1 + np.abs(data)))
@staticmethod
def scale_alphas(arr: np.array) -> np.array:
"""Scale the alpha values by its index in the sub-array"""
new_arr = []
for num, a in enumerate(np.transpose(arr)):
a *= num
new_arr.append(a)
return np.transpose(np.array(new_arr))
def alpha_values(self, k: np.array, acid: bool = True) -> np.array:
"""Finds the fraction of solution which each species of compound takes up at each pH."""
# If the k values are for K_b, convert to K_a. --> K_1 = K_w / K_n , K_2 = K_w / K_(n-1)
if not acid:
k = self.kw / np.flip(k) # TODO results in a Div by Zero error if pKa is too large (>330)
# The functionality of an acid or base can be determined by the number of dissociation constants it has.
n = len(k)
# Get the values for the [H+]^n power
h_vals = np.array([self.hydronium ** i for i in range(n, -1, -1)])
# Get the products of the k values.
k_vals = [np.prod(k[0:x]) for x in range(n + 1)]
# Prod and Sum the h and k values
denoms_arr = np.transpose(h_vals) * k_vals # Product of the sub-elements of the denominator
denoms = np.sum(denoms_arr, axis=1) # Sum of the sub-elements of the denominator
# Do the outermost alpha value calculation
alphas = np.transpose(np.divide(np.transpose(denoms_arr), denoms)) # Divide and re-transpose
if acid:
return np.array(alphas)
return np.flip(alphas, axis=0)
def trim_values(self, *args: Any) -> Generator:
"""Returns the data ranges where the volume is non-trivial and non-absurd."""
# Go until you are 1 past the last sub-reaction.
limiter = len(self.analyte.pKas) + 1
good_val_index = np.where((self.phi >= [0]) & (self.phi <= [limiter]))
# Trim the values for every chosen data set
rets = (arg[good_val_index] for arg in args) # Add the trimmed dataset to the return variable
return rets
def calculate_volume(self, acid_titrant: bool) -> Tuple[List, List]:
"""Calculate the volume of titrant required to reach each pH value."""
# Alpha values scaled by their index
scaled_alphas_analyte = self.scale_alphas(self.alpha_analyte)
scaled_alphas_titrant = self.scale_alphas(self.alpha_titrant)
# Sum the scaled alpha values. Axis=1 forces the summation to occur for each individual [H+] value.
summed_scaled_alphas_analyte = np.sum(scaled_alphas_analyte, axis=1)
summed_scaled_alphas_titrant = np.sum(scaled_alphas_titrant, axis=1)
# I found this written as delta somewhere, and thus it will be named.
delta = self.hydronium - self.hydroxide
# Conditional addition or subtraction based on the titrant.
if acid_titrant:
numerator = summed_scaled_alphas_analyte + (delta / self.concentration_analyte)
denominator = summed_scaled_alphas_titrant - (delta / self.concentration_titrant)
else:
numerator = summed_scaled_alphas_analyte - (delta / self.concentration_analyte)
denominator = summed_scaled_alphas_titrant + (delta / self.concentration_titrant)
# Solve for the volume
phi = numerator / denominator
volume = phi * self.volume_analyte * self.concentration_analyte / self.concentration_titrant
return volume, phi
def find_buffer_points(self) -> Tuple[List[int], np.array]:
"""Find the volumes of the buffer points based on the pKa values."""
pH, volume = self.trim_values(self.ph, self.volume_titrant)
pKas = np.array(self.analyte.pKas)
# All the volumes where the pH equals pKa
volume_indices = []
for pKa in pKas:
if pKa > 14: # Should never be larger than 14
continue
places = np.where(pH == closest_value(pKa, pH))[0][0]
volume_indices.append(places)
return volume[volume_indices], pKas
def find_equiv_points(self) -> Tuple[List, List]:
"""Find the equivalence points based on the progression of the reaction."""
pH, volume, phi = self.trim_values(self.ph, self.volume_titrant, self.phi)
points = []
for i in range(1, len(self.analyte.pKas) + 1):
closest = closest_value(i, phi)
points.append(np.where(phi == closest)[0][0])
return list(volume[points]), list(pH[points])
def deriv(self, degree: int) -> Tuple[np.array, np.array]:
"""Find the n-th derivative"""
pH, volume = self.trim_values(self.ph, self.volume_titrant)
# An object which makes splines
spline_maker = IUS(volume, pH)
# An object which calculates the derivative of those splines
deriv_function = spline_maker.derivative(n=degree)
# Calculate the derivative at all of the splines
d = deriv_function(volume)
return volume, d
``` |
{
"source": "jkeltner/district_profile",
"score": 3
} |
#### File: jkeltner/district_profile/census.py
```python
import logging
import httplib2
import json
from api_keys import CENSUS_API_KEY
# CHARTS
# This is a list of all the charts we have in an array with section names.
# First set of items is the section names
# Each item under a section is an array with fourt items:
# [Name, Description, Query Name, Annotations]
# The Query Name must match to a query item below.
# Annotations should be an array of String objects that should be displayed as
# annotations to the chart. This is usually due to shortening variable names.
census_charts = [
["Demographics" , [
["Age", "", "age", ["Please not that some bars are 5 year spans and others are 10 year spans."]],
["Ethnicity", "", "ethnicity",
["(1) Includes American Indians and Alaskan Natives",
"(2) Includes Hawaiians and other Pacific Islanders"]],
["Eduation", "Education attaitment of individuals 25 years of age and older.", "education", []],
["School Enrollment", "Portion of population 3 yrs and older enrolled in school.", "school_enrollment", []],
["Health Insurance", "Portion of the civilian population by health insurance type", "health_insurance", []]
]],
["Economics" , [
["Employment", "Employment status of individuals 16 years and older.", "employment", []],
["Income", "", "income", []],
["Industry", "", "industry",
["(1) Agriculture, forestry, fishing and hunting, and mining",
"(2) Transportation and warehousing, and utilities",
"(3) Finance and insurance, and real estate and rental and leasing",
"(4) Professional, scientific, and management, and administrative and waste management services",
"(5) Educational services, and health care and social assistance",
"(6) Arts, entertainment, and recreation, and accommodation and food services"
]],
["Occuption", "", "occupation", [
"(1) Management, business, science, and arts occupations",
"(2) Natural resources, construction, and maintenance occupations"
]],
["Class of Worker", "", "class_of_worker",
["(1) In own not incorporated business"]],
["Home Value", "Value of owner-occupied housing units", "home_value" ,[]],
["Rent", "Rental cost of occupied units", "rent" ,[]],
["Rent peer Income", "Gross rent as a percentage of gross income", "rent_per_income", []],
["Housing Vacancy", "Rate of vacancy by top of property", "housing_vacancy", []]
]]
]
# QUERIES
# This is the list of all queries for the charts.
# In each tuple, the first item is the label for the data point.
# The second item is the Census Bureau API variable name for the data point.
# URL to find variables: https://api.census.gov/data/2015/acs/acs1/profile/variables.html
#
queries = {
"age" : [
["Under 5" , "DP05_0004PE"],
["5 to 9" , "DP05_0005PE"],
["10 to 14" , "DP05_0006PE"],
["15 to 19" , "DP05_0007PE"],
["20 to 24" , "DP05_0008PE"],
["25 to 34" , "DP05_0009PE"],
["35 to 44" , "DP05_0010PE"],
["45 to 54" , "DP05_0011PE"],
["55 to 59" , "DP05_0012PE"],
["60 to 64" , "DP05_0013PE"],
["65 to 74" , "DP05_0014PE"],
["75 to 84" , "DP05_0015PE"],
["85+" , "DP05_0016PE"]
],
"class_of_worker" : [
["Private" , "DP03_0047PE"],
["Government" , "DP03_0048PE"],
["Self-employed (1)" , "DP03_0049PE"],
["Unpaid family workers" , "DP03_0050PE"]
],
"education" : [
["No HS" , "DP02_0059PE"],
["Some HS" , "DP02_0060PE"],
["HS Grad or GED" , "DP02_0061PE"],
["Some college" , "DP02_0062PE"],
["Associate's Degree" , "DP02_0063PE"],
["Bachelor's Degree" , "DP02_0064PE"],
["Grad or Prof Degree" , "DP02_0065PE"]
],
"employment" : [
["Employed Civilian" , "DP03_0004PE"],
["Employed Military" , "DP03_0006PE"],
["Unemployed" , "DP03_0005PE"],
["Not in Labor Force" , "DP03_0007PE"]
],
"ethnicity" : [
["White" , "DP05_0032PE"],
["Hispanic" , "DP05_0067PE"],
["Black or AA" , "DP05_0033PE"],
["Native American (1)" , "DP05_0034PE"],
["Asian" , "DP05_0039PE"],
["Pacific Islander (2)" , "DP05_0047PE"],
["Other" , "DP05_0052PE"],
["2+ Races " , "DP05_0030PE"]
],
"health_insurance" : [
["Private" , "DP03_0097PE"],
["Pulic" , "DP03_0098PE"],
["Uninsured" , "DP03_0099PE"]
],
"home_value" : [
["<$50K" , "DP04_0081PE"],
["$50-100K" , "DP04_0082PE"],
["$100-150K" , "DP04_0083PE"],
["$150-200K" , "DP04_0084PE"],
["$200-300K" , "DP04_0085PE"],
["$300-500K" , "DP04_0086PE"],
["$500K-1M" , "DP04_0087PE"],
["$1M+" , "DP04_0088PE"]
],
"housing_vacancy" : [
["Howeowner" , "DP04_0004E"],
["Rental" , "DP04_0005E"]
],
"income" : [
["Under $10K" , "DP03_0052PE"],
["$10-15K" , "DP03_0053PE"],
["$15-25K" , "DP03_0054PE"],
["$25-35K" , "DP03_0055PE"],
["$35-50K" , "DP03_0056PE"],
["$50-75K" , "DP03_0057PE"],
["$75-100K" , "DP03_0058PE"],
["$100-150K" , "DP03_0059PE"],
["$150-200K" , "DP03_0060PE"],
["$200K+" , "DP03_0061PE"]
],
"industry" : [
["Agriculture (1)" , "DP03_0033PE"],
["Construction" , "DP03_0034PE"],
["Manufacturing" , "DP03_0035PE"],
["Wholesale trade" , "DP03_0036PE"],
["Retail trade" , "DP03_0037PE"],
["Transportation (2)" , "DP03_0038PE"],
["Information" , "DP03_0039PE"],
["Finance + Real Estate (3)", "DP03_0040PE"],
["Management + Science (4)" , "DP03_0041PE"],
["Education and Health (5)" , "DP03_0042PE"],
["Arts + Entertainment (6)" , "DP03_0043PE"],
["Other services" , "DP03_0044PE"],
["Public administration" , "DP03_0045PE"]
],
"occupation" : [
["Management (1)" , "DP03_0027PE"],
["Service" , "DP03_0028PE"],
["Sales and Office" , "DP03_0029PE"],
["Construction (2)" , "DP03_0030PE"],
["Production (3)" , "DP03_0031PE"]
],
"rent" : [
["<$500" , "DP04_0127PE"],
["$500-999" , "DP04_0128PE"],
["$1K-1,499" , "DP04_0129PE"],
["$1,500 - 2K" , "DP04_0130PE"],
["$2K - 2,499" , "DP04_0131PE"],
["$2,500 - 3K" , "DP04_0132PE"],
["$3K+" , "DP04_0133PE"]
],
"rent_per_income" : [
["<15%" , "DP04_0137PE"],
["15-19.9%" , "DP04_0138PE"],
["20-24.9%" , "DP04_0139PE"],
["25-29.9%" , "DP04_0140PE"],
["30-34.9%" , "DP04_0141PE"],
["35%+" , "DP04_0142PE"]
],
"school_enrollment" : [
["Preschool" , "DP02_0053PE"],
["Kindergargten" , "DP02_0054PE"],
["Grades 1-8" , "DP02_0055PE"],
["High School" , "DP02_0056PE"],
["College or Grad" , "DP02_0057PE"]
]
}
# Function to iterate through all the charts
def getCensusData(chart_name, state, district):
labels, district_data, state_data, fed_data = getChartData(queries[chart_name], state, district)
resp = {
"labels" : labels,
"district_data" : district_data,
"state_data" : state_data,
"fed_data" : fed_data
}
return resp
# Function to pull the labels/data for a particular chart.
# Should be sent a dictionary of labels and the associated variable names.
# These dictionaries should all the be in the charts variable above.
def getChartData(query, state, district):
labels = []
variables = ""
for item in query:
labels.append(item[0])
if (variables != ""):
variables += ","
variables += item[1]
conn = httplib2.Http(disable_ssl_certificate_validation=True)
resp, content = conn.request(getURL(variables, state, district))
district_data = json.loads(content)[1][:len(labels)]
resp, content = conn.request(getURL(variables, state))
state_data = json.loads(content)[1][:len(labels)]
resp, content = conn.request(getURL(variables))
fed_data = json.loads(content)[1][:len(labels)]
return labels, district_data, state_data, fed_data
# Simple function that builds our Census API URLs
def getURL(variables, state=None, district=None):
if (state and district): for_block = "congressional+district:%s&in=state:%s" % (district, state)
elif (state) : for_block = "state:%s" % state
else : for_block = "us:*"
return "http://api.census.gov/data/2015/acs/acs1/profile?get=%s&for=%s&key=%s" % (variables, for_block, CENSUS_API_KEY)
``` |
{
"source": "jkenlooper/cookiecutter-chill",
"score": 3
} |
#### File: src/api/app.py
```python
import os
import sys
from flask import Flask
class API(Flask):
"API App"
def make_app(config=None, **kw):
app = API('api')
if config:
config_file = config if config[0] == os.sep else os.path.join(os.getcwd(), config)
app.config.from_pyfile(config_file)
app.config.update(kw)
# Import the views
from llama import LlamaView
# Register the views
app.add_url_rule('/llama/', view_func=LlamaView.as_view('llama'))
return app
def main():
from gevent import pywsgi
config_file = sys.argv[1]
app = make_app(config=config_file)
app.debug = app.config.get('DEBUG')
if app.debug:
app.run(
host='0.0.0.0',
port=5858,
use_reloader=True,
)
else:
server = pywsgi.WSGIServer(('0.0.0.0', 5858), app)
server.serve_forever(stop_timeout=10)
if __name__ == "__main__":
main()
``` |
{
"source": "jkenlooper/llama3-weboftomorrow-com",
"score": 2
} |
#### File: {{ cookiecutter.project_slug }}/bin/site-cfg.py
```python
import sys
import os.path
from api.tools import loadConfig
def main():
"""
Prints out the value for a config name in the site.cfg file.
"""
config_file = sys.argv[1]
name = sys.argv[2]
config = loadConfig(config_file)
value = config[name]
print(value)
if __name__ == "__main__":
main()
``` |
{
"source": "jkennedy-usgs/sgp-gsadjust",
"score": 2
} |
#### File: gsadjust/data/analysis.py
```python
import datetime as dt
import logging
import numpy as np
from .adjustment import AdjustedStation
class InversionError(Exception):
pass
def _equal_or_truncated(a, b):
"""For matching Gravnet output (Gravnet truncates to 6 characters)"""
return a == b or a[:6] == b
def numpy_inversion(adjustment):
"""
Pre- and post-processing data for network adjustment.
The actual adjustment is carried out in adjustment.python_lsq_inversion()
Parameters
----------
adjustment : Adjustment object
Stores information needed for the
"""
adjustment.adjustmentresults.text = []
# sta_dic_LS is a dictionary, key: station name, value: column for A matrix
sta_dic_ls = adjustment.sta_dic_ls
loop_ls_dict = adjustment.loop_ls_dict
# get number of observations:
n_rel_obs = len(adjustment.deltas)
n_abs_obs = len(adjustment.datums)
n_meters = adjustment.n_meters
ndrift = adjustment.ndrift
# dict of tuples, used to identify column of drift observation in A matrix:
# (loop.name, (column relative to end of A matrix, drift degree)
netadj_loop_keys = adjustment.netadj_loop_keys
# Initialize least squares matrices
# number of unknowns
nb_x = len(sta_dic_ls) + ndrift + n_meters
adjustment.adjustmentresults.n_unknowns = nb_x
# model matrix:
A = np.zeros((n_rel_obs + n_abs_obs, nb_x))
# weight matrix:
P = np.zeros((n_rel_obs + n_abs_obs, n_rel_obs + n_abs_obs)) # pas sur
# observation matrix:
Obs = np.zeros((n_rel_obs + n_abs_obs, 1))
# datum-free constraint vector:
S = np.zeros((nb_x, 1))
row = 0
delta_keys = []
# Populate least squares matrices
for delta in adjustment.deltas:
delta_keys.append(delta.__hash__())
dg = delta.dg if delta.type != "assigned" else delta.assigned_dg
Obs[row] = dg * delta.cal_coeff
P[row, row] = 1.0 / (delta.adj_sd ** 2)
A[row, sta_dic_ls[delta.sta1]] = -1
A[row, sta_dic_ls[delta.sta2]] = 1
# Populate 1 column per gravimeter for calibration coefficient
if adjustment.adjustmentoptions.cal_coeff:
meter = delta.meter
A[row, adjustment.meter_dic[meter] + len(sta_dic_ls)] = delta.dg
# Populate column(s) for drift, if included in network adjustment
if delta.ls_drift is not None:
loop_name = delta.ls_drift[0]
# It's possible for ls_drift to have been set, but the loop method to be
# something other than netadj
if loop_name:
if loop_ls_dict[loop_name] == "netadj":
for i in range(delta.ls_drift[1]): # degree of polynomial
A[
row,
len(sta_dic_ls)
+ n_meters
+ netadj_loop_keys[loop_name][0]
+ i,
] = (delta.sta2_t - delta.sta1_t) ** (i + 1)
S[sta_dic_ls[delta.sta1]] = 1
S[sta_dic_ls[delta.sta2]] = 1
row += 1
# add datum observation (absolute station(s) or station(s) with fixed values)
# Key errors handled by calling routine
i = 0
for datum in adjustment.datums:
A[n_rel_obs + i, sta_dic_ls[datum.station]] = 1
P[n_rel_obs + i, n_rel_obs + i] = 1.0 / datum.sd ** 2
Obs[n_rel_obs + i] = datum.g
i += 1
# Do the inversion
adjustment.A = A
adjustment.P = P
adjustment.Obs = Obs
adjustment.S = S
adjustment.dof = n_rel_obs + n_abs_obs - nb_x
adjustment.g_dic = dict()
# zero-division errors are caught by caller
adjustment.python_lsq_inversion()
# Populate results
results, sd_all = [], []
for i in range(len(sta_dic_ls)):
for key, val in sta_dic_ls.items():
if val == i:
try:
g = float(adjustment.X[i])
sd = float(np.sqrt(adjustment.var[i]))
t = AdjustedStation(key, g, sd)
results.append(t)
adjustment.g_dic[key] = g
adjustment.sd_dic[key] = sd
sd_all.append(sd)
except Exception:
raise InversionError("Bad variance in results.")
return
adjustment.adjustmentresults.avg_stdev = np.mean(sd_all)
# Retrieve calibration coefficient(s)
cal_dic = dict()
if adjustment.adjustmentoptions.cal_coeff:
for k, v in adjustment.meter_dic.items():
cal_dic[k] = (
float(1 - adjustment.X[len(sta_dic_ls) + v]),
float(np.sqrt(adjustment.var[len(sta_dic_ls) + v])),
)
else:
for k, v in adjustment.meter_dic.items():
cal_dic[k] = (1.0, 0.0)
adjustment.adjustmentresults.cal_dic = cal_dic
# calculate and display statistics:
adjustment.lsq_statistics()
return results
def compute_gravity_change(obstreemodel, table_type="simple"):
"""Calculates gravity change between surveys.
Parameters
----------
obstreemodel : ObsTreeModel
Tree structure with the data.
table_type : {"simple", "full", "list"}
Controls what is shown in the table.
Simple: One column per time interval. Dg calculated between each interval, and
between first and each subsequent interval.
Full: Shows g and sd at each station, and gravity changes converted to meters
of water.
List: Shows station, date, g, sd for each adjusted station.
Returns
-------
tuple
header, table, dates
"""
# Check that all values are positive (it should work either way, but it avoids confusion)
# for i in range(obstreemodel.invisibleRootItem().rowCount()):
# survey = obstreemodel.invisibleRootItem().child(i)
# for ii in range(survey.results_model.rowCount()):
# adj_station = survey.results_model.data(survey.results_model.index(ii, 0),
# role=256) # 256=Qt.UserRole
# if adj_station.g < 0:
# return False
compare_station, initial_station, iteration_station, iteration_name = (
None,
None,
None,
None,
)
logging.info("Calculating gravity change")
first = True
unique_station_names = set()
unique_stations = list()
for survey in obstreemodel.checked_surveys():
for ii in range(survey.rowCount()):
loop = survey.child(ii)
for iii in range(loop.rowCount()):
station = loop.child(iii)
unique_station_names.add(station.station_name)
unique_stations.append(station)
unique_station_names = sorted(unique_station_names)
out_table_iteration, out_table_cumulative = [], []
header1, header2 = [], []
lat, lon, elev, all_g = [], [], [], []
dates, header = [], []
if table_type == "list":
date_col, station_col, sd_col = [], [], []
for survey in obstreemodel.checked_surveys():
dates.append(dt.datetime.strptime(survey.name, "%Y-%m-%d"))
header = ["Station", "Date", "g", "Std. dev."]
for adj_station in survey.results:
station_col.append(adj_station.station)
date_col.append(survey.name)
all_g.append(adj_station.g)
sd_col.append(adj_station.sd)
table = [station_col, date_col, all_g, sd_col]
return header, table, dates
if table_type == "full":
for station in unique_station_names:
station_g = []
g_header = []
# station_coords can not exist during testing, maybe other times also?
try:
lonc, latc, elevc = obstreemodel.station_coords[station]
lon.append(f"{lonc:.5f}")
lat.append(f"{latc:.5f}")
elev.append(f"{elevc:.5f}")
except TypeError:
lat.append(-999)
lon.append(-999)
elev.append(-999)
except KeyError:
lat.append(-999)
lon.append(-999)
elev.append(-999)
for survey in obstreemodel.checked_surveys():
g_header.append(survey.name + "_g")
g_header.append(survey.name + "_sd")
for adj_station in survey.results:
if adj_station.station[:6] == station[:6]:
station_g.append("{:0.1f}".format(adj_station.g))
station_g.append("{:0.1f}".format(adj_station.sd))
break
else:
station_g.append("-999")
station_g.append("-999")
all_g.append(station_g)
for survey in obstreemodel.checked_surveys():
dates.append(dt.datetime.strptime(survey.name, "%Y-%m-%d"))
diff_cumulative = []
diff_iteration = []
if table_type == "full":
diff_cumulative_sd, diff_iteration_sd = [], []
if first:
# Calculate both the between-survey change and the change from the initial survey
initial_survey = survey.results
iteration_reference = initial_survey
reference_name = survey.name
iteration_name = reference_name
first = False
else:
if table_type == "simple":
header1.append(f"{iteration_name}_to_{survey.name}")
header2.append(f"{reference_name}_to_{survey.name}")
elif table_type == "full":
header1.append(f"dH2O_{iteration_name}_to_{survey.name}")
header1.append(f"dH2O_sd_{iteration_name}_to_{survey.name}")
header2.append(f"dH2O_{reference_name}_to_{survey.name}")
header2.append(f"dH2O_sd_{reference_name}_to_{survey.name}")
compare_survey = survey.results
for station_name in unique_station_names:
for initial_station in initial_survey:
# Iterate through, look for matching station. 'if' statements deal
# with Gravnet, which truncates station names to 6 characters
if _equal_or_truncated(initial_station.station, station_name):
break
else:
# If we get to the end without breaking, set it to None.
initial_station = None
for iteration_station in iteration_reference:
if _equal_or_truncated(iteration_station.station, station_name):
break
else:
# If we get to the end without breaking, set it to None.
iteration_station = None
for compare_station in compare_survey:
if _equal_or_truncated(compare_station.station, station_name):
break
else:
# If we get to the end without breaking, set it to None.
compare_station = None
if initial_station is not None and compare_station is not None:
if table_type == "simple":
diff_cumulative.append(
"{:0.1f}".format(compare_station.g - initial_station.g)
)
elif table_type == "full":
diff_cumulative.append(
"{:0.2f}".format(
(compare_station.g - initial_station.g) / 41.9
)
)
var = (
np.sqrt(compare_station.sd ** 2 + initial_station.sd ** 2)
/ 41.9
)
if np.isnan(var):
diff_cumulative_sd.append("-999")
else:
diff_cumulative_sd.append("{:0.2f}".format(var))
else:
diff_cumulative.append("-999")
if table_type == "full":
diff_cumulative_sd.append("-999") # for sd column
if iteration_station is not None and compare_station is not None:
if table_type == "simple":
diff_iteration.append(
"{:0.1f}".format(compare_station.g - iteration_station.g)
)
elif table_type == "full":
diff_iteration.append(
"{:0.2f}".format(
(compare_station.g - iteration_station.g) / 41.9
)
)
var = (
np.sqrt(compare_station.sd ** 2 + iteration_station.sd ** 2)
/ 41.9
)
if np.isnan(var):
diff_iteration_sd.append("-999")
else:
diff_iteration_sd.append("{:0.2f}".format(var))
else:
diff_iteration.append("-999")
if table_type == "full":
diff_iteration_sd.append("-999") # for sd column
out_table_iteration.append(diff_iteration)
out_table_cumulative.append(diff_cumulative)
if table_type == "full":
out_table_iteration.append(diff_iteration_sd)
out_table_cumulative.append(diff_cumulative_sd)
iteration_reference = compare_survey
iteration_name = survey.name
out_table = (
[list(unique_station_names)] + out_table_iteration + out_table_cumulative
)
if table_type == "simple":
# deal with 2-survey case
if header1 == header2:
header = ["station"] + header1
table = out_table[:-1]
else:
header = ["station"] + header1 + header2
table = out_table
return header, table, dates
elif table_type == "full":
# transpose table
g = [list(i) for i in zip(*all_g)]
table = [unique_station_names, lat, lon, elev]
table += g
# deal with 2-survey case
if header1 == header2:
header = (
["Station", "Latitude", "Longitude", "Elevation"] + g_header + header1
)
table += out_table_iteration
else:
header = (
["Station", "Latitude", "Longitude", "Elevation"]
+ g_header
+ header1
+ header2
)
table += out_table_iteration
table += out_table_cumulative
# transpose back
table = [list(i) for i in zip(*table)]
return header, table, dates
```
#### File: gsadjust/drift/continuous.py
```python
import logging
import numpy as np
from scipy.interpolate import UnivariateSpline
from ..data import DeltaNormal
N_PTS_IN_INTERPOLATION = 300
N_PTS_IN_EXTRAPOLATION = 200
def drift_continuous(
data,
plot_data,
drift_x,
drift_rate,
method_key,
tension_slider_value,
extrapolation_type,
weight_obs,
min_time,
max_time,
loop_name,
):
"""Interpolate drift model: polynomial, spline, etc. at N_PTS_IN_INTERPOLATION
points, plus N_PTS_IN_EXTRAPOLATION on either side.
These values need to be relatively small for decent performance.
Parameters
----------
data : list
list of ObsTreeStations
plot_data : list
One entry per station. Each entry is a list, in the order:
[[plot time values],
[plot g values],
station name,
[g standard deviation],
[time standard deviation]]
drift_x : list
Drift time observations (x location of points on bottom plot)
drift_rate : list
Drift rate observations (y location of points on bottom plot)
method_key : {0, 1, 2, 3, 4}
Indicates type of drift correction.
0: Constant
1: Spline
2-4: Polynomial (degree is one less than the value)
tension_slider_value : int
Controls tension on the interpolated spline
extrapolation_type : {1, anything else}
Controls how interpolation is extended from the outermost data.
1: Constant
not 1: linearly extend the fitted curve at the same slope as the first/last
2 data points
weight_obs : int
Controls if observations are weighted when fitting a constant drift rate.
Only used if drift is set to constant, not for other methods.
0: no weighting
not 0: weighted
min_time : float
Time to extrapolate at the beginning. Should be the time of the first station
occupation of the loop.
max_time : float
Time to extrapolate at the end. Should be the time of the last station
occupation of the loop.
loop_name : str
loop name, for creating deltas
Returns
-------
delta_list : list
List of deltas
xp : ndarray
For plotting the bottom plot
yp : ndarray
For plotting the bottom plot
z_main : (mean_drift, sigma)
These are displayed on the plot.
"""
xp = np.linspace(min(drift_x), max(drift_x), N_PTS_IN_INTERPOLATION) # constant
drift_stats = None
z_main = []
if method_key == 0: # constant drift correction
if weight_obs == 0:
mean_drift = sum(drift_rate) / len(drift_rate)
sigma = np.std(drift_rate) / np.sqrt(len(drift_rate))
yp = np.zeros(xp.size) + mean_drift
z_main = [(mean_drift, sigma)]
# Weight observations according to NGA method
else:
drifts, drift_w = [], []
for station_data in plot_data:
t, R, Rsd, tsd = (
station_data[0],
station_data[1],
station_data[3],
station_data[4],
)
if len(t) > 1:
for i in range(1, len(t)):
dr = R[i] - R[0]
dt = (t[i] - t[0]) * 24
sdr = np.sqrt(Rsd[i] ** 2 + Rsd[0] ** 2)
sdt = np.sqrt(tsd[i] ** 2 + tsd[0] ** 2)
drifts.append(dr / dt)
drift_sd = np.sqrt(
sdr ** 2 / dt ** 2 + dr ** 2 * sdt ** 2 / dt ** 4
)
drift_w.append(1 / drift_sd ** 2)
num = []
for idx, w in enumerate(drift_w):
num.append(w * drifts[idx])
mean_drift = np.sum(num) / np.sum(drift_w)
num = []
for idx, w in enumerate(drift_w):
num.append(w * (drifts[idx] - mean_drift) ** 2)
sigma_d = np.sqrt(np.sum(num) / ((len(drift_w) - 1) * np.sum(drift_w)))
drift_stats = dict()
drift_stats["t0"] = plot_data[0][0][0]
drift_stats["sigma_d"] = sigma_d
drift_stats["mean_drift"] = mean_drift
yp = np.zeros(xp.size) + mean_drift
z_main = [(mean_drift, sigma_d)]
else:
x0 = [f - np.min(drift_x) for f in drift_x]
xp0 = [f - np.min(xp) for f in xp]
idx = sorted(range(len(x0)), key=lambda xpt: x0[xpt])
x_sorted, drift_rate_sorted = [], []
for i in idx:
x_sorted.append(x0[i])
drift_rate_sorted.append(drift_rate[i])
x0 = x_sorted
drift_rate = drift_rate_sorted
if method_key == 9:
pass
if method_key == 1: # spline
try:
s = UnivariateSpline(x0, drift_rate, k=3, s=tension_slider_value)
xs = np.linspace(x0[0], x0[-1], N_PTS_IN_INTERPOLATION)
yp = s(xs)
logging.info(
"Spline drift correction, tension={}".format(tension_slider_value)
)
except Exception:
raise IndexError
else:
# Polynomial interpolation. Degree is one less than the method key, e.g.,
# method_key == 2 is 1st order polynomial, etc.
try:
z_main = np.polyfit(x0, drift_rate, method_key - 1)
p = np.poly1d(z_main)
yp = p(xp0)
logging.info(
"Polynomial drift correction degree {}".format(method_key - 1)
)
except np.linalg.LinAlgError as e:
return np.linalg.LinAlgError
# Method for extrapolating beyond fitted drift curve extent
if extrapolation_type == 1: # constant
new_xp = np.linspace(min_time, min(drift_x), N_PTS_IN_EXTRAPOLATION)
new_xp = np.append(new_xp, xp)
new_xp = np.append(new_xp, np.linspace(max(drift_x), max_time, 200))
xp = new_xp
new_yp = np.ones(200) * yp[0]
new_yp = np.append(new_yp, yp)
new_yp = np.append(new_yp, np.ones(200) * yp[-1])
yp = new_yp
else: # linear extrapolation from first two (and last two) points
# get first two points
x = xp[:2]
y = yp[:2]
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
new_xp1 = np.linspace(min_time, min(drift_x), 200)
yp1 = p(new_xp1)
x = xp[-2:]
y = yp[-2:]
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
new_xp2 = np.linspace(max(drift_x), max_time, 200)
yp2 = p(new_xp2)
xp_temp = np.append(new_xp1, xp)
xp = np.append(xp_temp, new_xp2)
yp_temp = np.append(yp1, yp)
yp = np.append(yp_temp, yp2)
delta_list = calc_cont_dg(xp, yp, data, loop_name, drift_stats)
return delta_list, xp, yp, z_main
def calc_cont_dg(xp, yp, data, loop_name, drift_stats):
"""
Calculates delta-g's while removing drift using the input drift model
Parameters
----------
xp : ndarray
times of continuous drift model
yp : ndarray
continuous drift model
data : list
list of ObsTreeStations
loop_name : str
drift_stats : dict or None
If dict, observations are weighted; None if not
Returns
-------
list
list of deltas
"""
first = True
ypsum = [0]
delta_list = []
for x, drift_rate in zip(xp, yp):
if first:
first = False
prev_x = x
else:
prev_sum = ypsum[-1]
interval = (x - prev_x) * 24
prev_x = x
ypsum.append(prev_sum + drift_rate * interval)
xp = xp.tolist()
yp = ypsum # yp = yp.tolist()
prev_station = data.pop(0)
for station in data:
# If using weighted dg
if drift_stats:
station.assigned_sd = np.sqrt(
station.original_sd ** 2
+ ((station.tmean - drift_stats["t0"]) * 24) ** 2
* drift_stats["sigma_d"] ** 2
+ np.sqrt(station.t_stdev ** 2 + data[0].t_stdev ** 2)
* drift_stats["mean_drift"] ** 2
)
else:
station.assigned_sd = None
drift1_idx = min(
range(len(xp)), key=lambda i: abs(xp[i] - prev_station.tmean)
)
drift1 = yp[drift1_idx]
drift2_idx = min(range(len(xp)), key=lambda i: abs(xp[i] - station.tmean))
drift2 = yp[drift2_idx]
delta = DeltaNormal(
prev_station, station, driftcorr=drift2 - drift1, loop=loop_name
)
delta_list.append(delta)
prev_station = station
return delta_list
```
#### File: gsadjust/file/write.py
```python
import csv
import logging
import os
import time
from ..data.analysis import compute_gravity_change
SEPARATOR = ' | '
def line_generator(lines):
for line in lines:
yield line + "\n"
def export_metadata(obsTreeModel, data_path):
"""
Write metadata text to file. Useful for USGS data releases.
The filename is generated automatically with a timestamp.
Parameters
----------
obsTreeModel : MainProg data object
data_path : str
Path to write output
Returns
-------
filename or False
"""
filename = os.path.join(
data_path, "GSadjust_MetadataText_" + time.strftime("%Y%m%d-%H%M") + ".txt"
)
output_format = 'table'
export_fn = {
'table': _export_metadata_table,
'text': _export_metadata_text,
}
fn = export_fn.get(output_format)
lines = fn(obsTreeModel)
if not lines:
return False
with open(filename, 'w') as fid:
fid.write(
"Attribute accuracy is evaluated from the least-squares network adjustment"
" results. "
)
fid.writelines(lines)
return filename
def _export_metadata_table(obsTreeModel):
table = [
SEPARATOR.join([
"Survey", "Max. delta residual", "Max. datum residual", "Mean SD", "Deltas", "Deltas not used",
"Datums", "Datums not used"])
]
sf_header_written = False
for survey in obsTreeModel.checked_surveys():
if survey.adjustment.adjustmentresults.n_datums > 0:
table.append(
f"{survey.name} "
f"{survey.adjustment.adjustmentresults.max_dg_residual:>5.1f} "
f"{survey.adjustment.adjustmentresults.max_datum_residual:>5.1f} "
f"{survey.adjustment.adjustmentresults.avg_stdev:>5.1f} "
f"{survey.adjustment.adjustmentresults.n_deltas:>4} "
f"{survey.adjustment.adjustmentresults.n_deltas_notused:>3} "
f"{survey.adjustment.adjustmentresults.n_datums:>3} "
f"{survey.adjustment.adjustmentresults.n_datums_notused:>3}"
)
for survey in obsTreeModel.checked_surveys():
if survey.adjustment.adjustmentresults.n_datums > 0:
if len(survey.adjustment.adjustmentresults.cal_dic) > 0:
if not sf_header_written:
table.append("Relative gravimeter scale factor(s)")
table.append(
"Survey | Meter | Scale factor | Scale factor S.D. (0 = specified S.F.)"
)
sf_header_written = True
for k, v in survey.adjustment.adjustmentresults.cal_dic.items():
table.append(
"{} {:>6} {:>10.6f} {:>10.6f}".format(
survey.name, k, v[0], v[1]
)
)
elif survey.adjustment.adjustmentoptions.specify_cal_coeff:
if not sf_header_written:
table.append("Relative gravimeter scale factor(s)")
table.append(
"Survey | Meter | Scale factor | Scale factor S.D."
)
sf_header_written = True
for (
k,
v,
) in survey.adjustment.adjustmentoptions.meter_cal_dict.items():
table.append(
"{} {:>6} {:>10.6f} 0".format(survey.name, k, v)
)
return line_generator(table) if table else False
def _export_metadata_text(obsTreeModel):
lines = []
for survey in obsTreeModel.checked_surveys():
if (
survey.adjustment.adjustmentresults.n_datums > 0
): # check that there are results
lines.append(
f'For the {survey.name} survey, the minimum and maximum gravity-difference '
f'residuals were {survey.adjustment.adjustmentresults.min_dg_residual:0.1f} '
f'and {survey.adjustment.adjustmentresults.max_dg_residual:0.1f} '
f'microGal, respectively. The minimum and maximum datum (absolute-gravity station) residuals '
f'were {survey.adjustment.adjustmentresults.min_datum_residual:0.1f} and '
f'{survey.adjustment.adjustmentresults.max_datum_residual:0.1f} microGal, respectively. '
f'The average standard deviation of the adjusted gravity values at each station '
f'(derived from the network adjustment) was {survey.adjustment.adjustmentresults.avg_stdev:0.1f} microGal. '
)
# TODO: account for instance of 1 outlier ('1 was removed')
datum_was_or_were, delta_was_or_were = 'were', 'were'
outlier_or_outliers = 'outliers'
if survey.adjustment.adjustmentresults.n_datums == 1:
datum_was_or_were = 'was'
if survey.adjustment.adjustmentresults.n_deltas_notused == 1:
delta_was_or_were = 'was'
outlier_or_outliers = 'an outlier'
lines.append(
'{} out of {} possible gravity differences were used in the adjustment ({} {} removed '.format(
survey.adjustment.adjustmentresults.n_deltas,
survey.adjustment.adjustmentresults.n_deltas_notused
+ survey.adjustment.adjustmentresults.n_deltas,
survey.adjustment.adjustmentresults.n_deltas_notused,
delta_was_or_were,
)
)
lines.append(
'as {}). {} out of {} possible datum observations {} used. '.format(
outlier_or_outliers,
survey.adjustment.adjustmentresults.n_datums,
survey.adjustment.adjustmentresults.n_datums_notused
+ survey.adjustment.adjustmentresults.n_datums,
datum_was_or_were,
)
)
logging.info('Metadata text written to file')
return line_generator(lines) if lines else False
def export_summary(obsTreeModel, data_path):
"""
Write summary of procesing to text file. Can be used to reproduce results.
The filename is generated automatically with a timestamp.
Parameters
----------
obsTreeModel : MainProg data object
data_path : str
Path to write output
Returns
-------
filename or False
"""
fn = os.path.join(
data_path, "GSadjust_Summary_" + time.strftime("%Y%m%d-%H%M") + ".txt"
)
# Write header info
with open(fn, "w") as fid:
fid.write(
"# GSadjust processing summary, {}\n".format(
time.strftime("%Y-%m-%d %H:%M")
)
)
fid.write("# Station data\n")
for survey in obsTreeModel.surveys():
for loop in survey.loops():
fid.write(
"# Survey {}, Loop {}, Source file: {}\n".format(
survey.name, loop.name, loop.source
)
)
for iii in range(loop.rowCount()):
station = loop.child(iii)
fid.write("# " + station.summary_str)
fid.write(
"# Checked | Station | Raw gravity | ET correction | Corr."
" gravity | Std. dev.\n"
)
for sample_str in station.iter_samples():
fid.write(sample_str)
fid.write("# Loop data\n")
for survey in obsTreeModel.surveys():
for ii in range(survey.rowCount()):
loop = survey.child(ii)
fid.write(
"# Survey {}, Loop {}, Source file: {}\n# ".format(
survey.name, loop.name, loop.source
)
)
fid.write(str(loop))
fid.write(
"# Checked | Station1 | Station2 | Date | Time (UTC) | delta-g |"
" Std. dev. | Drift correction\n"
)
for delta in loop.deltas:
fid.write(
"{} {} {} {} {:.2f} {:.2f} {}\n".format(
int(delta.checked / 2),
delta.sta1,
delta.sta2,
delta.time_string(),
delta.dg,
delta.sd,
delta.driftcorr,
)
)
fid.write("# Adjustment data\n")
for survey in obsTreeModel.surveys():
if survey.checkState() == 0:
fid.write(
"Survey {} not included in results (survey was unchecked)\n".format(
survey.name
)
)
else:
fid.write("# Adjustment options, survey: {}\n".format(survey.name))
fid.write(str(survey.adjustment.adjustmentoptions))
fid.write(
"# Deltas in the adjustment, survey: {}\n".format(survey.name)
)
fid.write(
"# Checked | Station1 | Station2 | Date | Time (UTC) | delta-g |"
" Std. dev. | Std. dev. for adj. | Residual\n"
)
for delta in survey.adjustment.deltas:
fid.write("{}\n".format(delta))
fid.write(
"# Datums in the adjustment, survey: {}\n".format(survey.name)
)
fid.write(
"# Checked | Station | Date | Gravity | Std. dev. | Residual\n"
)
for datum in survey.adjustment.datums:
fid.write("{}\n".format(datum))
fid.write("# Adjustment results, survey: {}\n".format(survey.name))
lines = line_generator(survey.adjustment.results_string())
fid.writelines(lines)
fid.write("# Adjusted station values, survey: {}\n".format(survey.name))
fid.write("# Station | Gravity | Std. dev.\n")
for adj_sta in survey.results:
fid.write("{}\n".format(str(adj_sta)))
return fn
def export_data(obstreemodel, data_path):
"""
Export gravity change table to csv file
The filename is generated automatically with a timestamp.
Parameters
----------
obstreemodel : MainProg data object
data_path : str
Path to write output
Returns
-------
filename or False
"""
fn = os.path.join(
data_path, "GSadjust_TabularData_" + time.strftime("%Y%m%d-%H%M") + ".csv"
)
table = compute_gravity_change(obstreemodel, table_type="full")
with open(fn, "w", newline="\n") as fid:
wr = csv.writer(fid)
wr.writerow(table[0])
for row in table[1]:
wr.writerow(row)
return fn
```
#### File: gui/tabs/drift.py
```python
import datetime as dt
import logging
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.dates import DateFormatter, date2num
from matplotlib.figure import Figure
from ..messages import MessageBox
from ..widgets import IncrMinuteTimeEdit
from ...data import DeltaList, DeltaNormal
from ...drift import drift_continuous, drift_roman
from ...models import (
DeltaTableModel,
SamplesTableModel,
TareTableModel,
)
from ...obstree import ObsTreeLoop
###########################################################################
# GSadjust drift tab
###########################################################################
class TabDrift(QtWidgets.QWidget):
station_label = None
def __init__(self, parent):
super(TabDrift, self).__init__()
self.parent = parent
self.dpi = 100
self.popup_menu = QtWidgets.QMenu(None)
# Main window
layout_main = QtWidgets.QVBoxLayout()
main_vsplitter_window = QtWidgets.QSplitter(Qt.Vertical, self)
# Setup drift figures (Roman and Continuous). Only one will be shown at a time.
# Drift figure: default and roman
self.drift_window = QtWidgets.QSplitter(Qt.Horizontal, self)
self.drift_fig = Figure((3.0, 5.0), dpi=self.dpi, facecolor="white")
self.drift_single_canvas = FigureCanvas(self.drift_fig)
self.drift_fig.subplots_adjust(wspace=0.3)
self.axes_drift_single = self.drift_fig.add_subplot(111)
# Drift figure: continuous
# Plot panel
self.drift_cont_plotpanel = QtWidgets.QSplitter(Qt.Vertical, self)
# Top plot - lines
self.drift_cont_figtop = Figure((3.0, 5.0), dpi=self.dpi, facecolor="white")
self.drift_cont_canvastop = FigureCanvas(self.drift_cont_figtop)
self.drift_cont_figtop.subplots_adjust(wspace=0.3)
self.axes_drift_cont_upper = self.drift_cont_figtop.add_subplot(111)
# Bottom plot - drift curves
self.drift_cont_figbot = Figure((3.0, 5.0), dpi=self.dpi, facecolor="white")
self.drift_cont_canvasbot = FigureCanvas(self.drift_cont_figbot)
self.drift_cont_figbot.subplots_adjust(wspace=0.3)
self.axes_drift_cont_lower = self.drift_cont_figbot.add_subplot(111)
# Drift tab tables
self.dg_avg_table = DeltaTableModel()
self.delta_view = QtWidgets.QTableView()
self.cont_label_widget = QtWidgets.QWidget()
#######################################################################
# Widgets for right-hand display of drift controls/options
#######################################################################
# Drift method widget
self.driftmethod_combobox_key = {
0: "None",
1: "Network adjustment",
2: "Roman (interpolate)",
3: "Continuous model",
}
self.driftmethod_combobox = QtWidgets.QComboBox()
self.driftmethod_combobox.activated.connect(self.set_drift_method)
for item in self.driftmethod_combobox_key.values():
self.driftmethod_combobox.addItem(item)
# Widget to remove dg-observations with a long elapsed time in between
self.drift_screen_elapsed_time = CustomCheckBox(
"Max. time between repeats (hh:mm)"
)
self.drift_screen_elapsed_time.setChecked(False)
self.drift_screen_elapsed_time.clicked.connect(self.time_extent_changed)
self.drift_time_spinner = IncrMinuteTimeEdit(QtCore.QTime(1, 0))
self.drift_time_spinner.timeChanged.connect(self.time_extent_changed)
self.drift_time_spinner.setDisplayFormat("hh:mm")
# Widget to add horizontal-extent lines to drift-rate plot
self.drift_plot_hz_extent = QtWidgets.QCheckBox(
"Show time-extent of drift observation"
)
self.drift_plot_hz_extent.setChecked(False)
self.drift_plot_hz_extent.stateChanged.connect(self.plot_drift)
self.drift_plot_weighted = CustomCheckBox("Weight drift observations")
self.drift_plot_weighted.setChecked(False)
self.drift_plot_weighted.clicked.connect(self.update_weighted)
self.tension_slider = QtWidgets.QSlider(Qt.Horizontal)
self.tension_slider.setRange(10, 2500)
self.tension_slider.setValue(1250)
self.tension_slider.setEnabled(False)
self.tension_slider.valueChanged.connect(self.update_tension)
self.tension_label = QtWidgets.QLabel()
self.tension_label.setText("{:2.2f}".format(self.tension_slider.value()))
self.tension_label.setAlignment(Qt.AlignCenter)
self.drift_polydegree_combobox_key = {
0: "Constant",
1: "Spline",
2: "1st order polynomial",
3: "2nd order polynomial",
4: "3rd order polynomial",
}
self.drift_polydegree_combobox = QtWidgets.QComboBox()
self.drift_polydegree_combobox.activated.connect(self.drift_combobox_updated)
for item in self.drift_polydegree_combobox_key.values():
self.drift_polydegree_combobox.addItem(item)
self.drift_cont_behaviorcombobox_key = {0: "Extrapolate", 1: "Constant"}
self.drift_cont_startendcombobox = QtWidgets.QComboBox()
self.drift_cont_startendcombobox.activated.connect(self.drift_combobox_updated)
for item in self.drift_cont_behaviorcombobox_key.values():
self.drift_cont_startendcombobox.addItem(item)
self.offset_slider = QtWidgets.QSlider(Qt.Horizontal)
self.offset_slider.setRange(0, 10)
self.offset_slider.setValue(0)
self.offset_slider.valueChanged.connect(self.plot_drift)
drift_controls = QtWidgets.QWidget()
drift_cont_control_layout = QtWidgets.QHBoxLayout()
drift_control_sublayout = QtWidgets.QVBoxLayout()
grid_widget = QtWidgets.QWidget()
grid = QtWidgets.QGridLayout()
grid.addWidget(QtWidgets.QLabel("Drift correction method"), 0, 0)
grid.addWidget(self.driftmethod_combobox, 0, 1)
grid.addWidget(QtWidgets.QLabel("Drift model type"), 1, 0)
grid.addWidget(self.drift_polydegree_combobox, 1, 1)
grid.addWidget(QtWidgets.QLabel("Behavior at start/end:"), 2, 0)
grid.addWidget(self.drift_cont_startendcombobox, 2, 1)
grid.addWidget(self.drift_screen_elapsed_time, 3, 0)
grid.addWidget(self.drift_time_spinner, 3, 1)
grid.addWidget(self.drift_plot_hz_extent, 4, 0)
grid.addWidget(self.drift_plot_weighted, 5, 0)
grid.addWidget(QtWidgets.QLabel("Vertical line offset"), 6, 0)
grid.addWidget(self.offset_slider, 6, 1)
grid.addWidget(QtWidgets.QLabel("Spline tension:"), 7, 0)
grid.addWidget(self.tension_slider, 7, 1)
grid.addWidget(self.tension_label, 7, 2)
grid_widget.setLayout(grid)
drift_control_sublayout.addWidget(grid_widget)
self.tare_view = QtWidgets.QTableView()
# self.tare_view.clicked.connect(self.update_tares)
self.tare_view.setContextMenuPolicy(Qt.CustomContextMenu)
self.tare_view.customContextMenuRequested.connect(self.tare_context_menu)
self.tare_view.setModel(TareTableModel())
#
# self.tare_proxy_model = QtCore.QSortFilterProxyModel(self)
# self.tare_view.setModel(self.tare_proxy_model)
# self.tare_view.setSortingEnabled(True)
self.tare_popup_menu = QtWidgets.QMenu("tare Popup Menu", self)
self.mnDeleteTare = QtWidgets.QAction("Delete tare", self)
self.mnDeleteTare.triggered.connect(self.parent.delete_tare)
lbl = QtWidgets.QLabel("Tares")
lbl.setFont(QtGui.QFont("Times", 11, QtGui.QFont.Bold))
lbl.setFixedHeight(30)
drift_control_sublayout.addItem(QtWidgets.QSpacerItem(40, 42))
drift_control_sublayout.addWidget(lbl)
drift_control_sublayout.addWidget(self.tare_view)
control_subwidget = QtWidgets.QWidget()
control_subwidget.setLayout(drift_control_sublayout)
drift_cont_control_layout.addWidget(control_subwidget)
drift_cont_control_layout.addStretch()
drift_controls.setLayout(drift_cont_control_layout)
drift_controls.setFixedWidth(500)
self.drift_cont_plotpanel.addWidget(self.drift_cont_canvastop)
self.drift_cont_plotpanel.addWidget(self.drift_cont_canvasbot)
self.drift_window.addWidget(self.drift_single_canvas)
self.drift_window.addWidget(self.drift_cont_plotpanel)
self.drift_window.addWidget(drift_controls)
self.drift_window.addWidget(QtWidgets.QWidget())
main_vsplitter_window.addWidget(self.drift_window)
self.drift_single_canvas.hide()
lbls = QtWidgets.QHBoxLayout()
lbl1 = QtWidgets.QLabel("Relative-gravity differences (delta-g's)", self)
lbls.addWidget(lbl1)
self.cont_label_widget.setLayout(lbls)
self.cont_label_widget.setFixedHeight(30)
main_vsplitter_window.addWidget(self.cont_label_widget)
self.cont_label_widget.hide()
self.roman_label_widget = QtWidgets.QWidget()
lbls = QtWidgets.QHBoxLayout()
lbl1 = QtWidgets.QLabel("Relative-gravity differences (delta-g's)", self)
lbl2 = QtWidgets.QLabel("Average gravity differences", self)
lbls.addWidget(lbl1)
lbls.addWidget(lbl2)
self.roman_label_widget.setLayout(lbls)
self.roman_label_widget.setFixedHeight(30)
main_vsplitter_window.addWidget(self.roman_label_widget)
self.roman_label_widget.hide()
# dg table (Roman method)
self.dg_samples_proxy_model = QtCore.QSortFilterProxyModel(self)
self.dg_samples_proxy_model.setSourceModel(SamplesTableModel())
self.dg_samples_view = QtWidgets.QTableView()
self.dg_samples_view.setModel(self.dg_samples_proxy_model)
self.dg_samples_view.setSortingEnabled(True)
# self.delta_proxy_model = QtCore.QSortFilterProxyModel(self)
# self.delta_view.setModel(self.delta_proxy_model)
# self.delta_view.setSortingEnabled(True)
self.delta_view.setModel(self.dg_avg_table)
main_hsplitter_window = QtWidgets.QSplitter(Qt.Horizontal, self)
main_hsplitter_window.addWidget(self.dg_samples_view)
main_hsplitter_window.addWidget(self.delta_view)
main_hsplitter_window.setMinimumHeight(300)
main_vsplitter_window.addWidget(main_hsplitter_window)
self.delta_view.show()
self.dg_samples_view.hide()
layout_main.addWidget(main_vsplitter_window)
self.setLayout(layout_main)
self.reset()
def reset(self):
self.driftmethod_combobox.setCurrentIndex(0)
self.drift_polydegree_combobox.setCurrentIndex(0)
self.axes_drift_single.cla()
self.axes_drift_cont_lower.clear()
self.axes_drift_cont_upper.clear()
self.axes_drift_cont_upper.figure.canvas.draw()
self.axes_drift_cont_lower.figure.canvas.draw()
self.axes_drift_single.figure.canvas.draw()
self.delta_view.setModel(DeltaTableModel())
self.dg_samples_view.model().sourceModel().init_data([])
self.tare_view.setModel(TareTableModel())
# This section provides the right-click context menu in the continuous drift lower plot - not implemented
# def drift_newpoint_picked(self, event):
# if event.button == 3:
# self.drift_rate_context_menu()
#
# def drift_point_picked(self, event):
# if event.mouseevent.button == 3:
# self.drift_rate_context_menu(from_pick=True)
#
# def drift_rate_context_menu(self, from_pick=False):
# """
# Not functional (other than showing the menu). Should allow points to be excluded, or artificial points added,
# to the continuous drift correction.
# :param from_pick: Boolean, True if a point was picked
# """
# if from_pick:
# add = QtWidgets.QAction(QtGui.QIcon(""), "Add point to drift model", self,
# triggered=self.drift_cont_addpoint,
# enabled=False)
# remove = QtWidgets.QAction(QtGui.QIcon(""), "Remove point from model", self,
# triggered=self.drift_cont_removepoint)
# self.popup_menu.addAction(remove)
# else:
# add = QtWidgets.QAction(QtGui.QIcon(""), "Add point to drift model", self,
# triggered=self.drift_cont_addpoint)
# remove = QtWidgets.QAction(QtGui.QIcon(""), "Remove point from model", self,
# triggered=self.drift_cont_removepoint,
# enabled=False)
#
# self.popup_menu.addAction(add)
# self.popup_menu.addAction(remove)
# cursor = QtGui.QCursor()
# self.popup_menu.popup(cursor.pos())
#
# def drift_cont_removepoint(self):
# pass
#
# def drift_cont_addpoint(self):
# pass
def time_extent_changed(self):
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
hour = self.drift_time_spinner.dateTime().time().hour()
minute = self.drift_time_spinner.dateTime().time().minute()
obstreeloop.time_extent_time = (hour, minute)
obstreeloop.time_extent_check = self.drift_screen_elapsed_time.checkState()
self.parent.update_drift_tables_and_plots()
def show_line_label(self, event, axes):
"""
Shows the station name in the upper left of the drift plot when a
line is clicked.
Parameters
----------
event : Matplotlib event
axes : Current axes (differs for none|netadj|roman vs continuous)
"""
thisline = event.artist
if self.station_label is not None:
self.station_label.set_text("")
self.station_label = axes.text(
0.05,
0.95,
thisline.name,
horizontalalignment="center",
verticalalignment="center",
transform=axes.transAxes,
)
axes.figure.canvas.draw()
@staticmethod
def screen_for_elapsed_time(plot_data, elapsed_time):
"""
For exclude repeat observations with a lot of elapsed time between
occupations.
Parameters
----------
plot_data : list
List of lists with plot data
elapsed_time : int
Maximum time to be considered a repeat, in minutes
Returns
-------
List
list with same format as plot_data, missing the data that were excluded
"""
new_data = []
for line in plot_data:
x = [x for x in line[0]]
y = [y for y in line[1]]
new_x, new_y = [], []
i = 0
for i in range(1, len(x)):
x_diff = x[i] - x[i - 1]
if x_diff * 1440 < elapsed_time:
# Check that there's at least two points in the new line segment
if len(new_x) == 0:
new_x += [x[i - 1], x[i]]
new_y += [y[i - 1], y[i]]
elif abs(new_x[-1] - x[i - 1]) < 0.0001:
new_x.append(x[i])
new_y.append(y[i])
else:
new_data.append([new_x, new_y, line[2]])
new_x = [x[i - 1], x[i]]
new_y = [y[i - 1], y[i]]
if len(new_x) > 0:
new_data.append([new_x, new_y, line[2]])
return new_data
def update_weighted(self):
"""
Callback for weight drift observations
"""
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
if obstreeloop:
obstreeloop.drift_cont_weighting = self.drift_plot_weighted.checkState()
self.drift_plot_weighted.update_drift_plots.emit()
def update_tension(self):
"""
Callback for spline tension slider
"""
self.tension_label.setText(str(self.tension_slider.value()))
model = self.plot_drift()
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
self.update_delta_model(obstreeloop.drift_method, model)
self.parent.update_drift_tables_and_plots()
@staticmethod
def calc_none_dg(data, loop_name):
"""
Calculates delta-g's from successive gravity observations
Parameter
---------
data : list
list of stations from which to calculate delta-g
Returns
-------
list
List of deltas
"""
deltas = []
# Take the first station from the list, or None if there aren't any.
prev_station = data.pop(0) if data else None
for station in data:
delta = DeltaNormal(prev_station, station, driftcorr=0.0, loop=loop_name)
deltas.insert(0, delta)
prev_station = station
return deltas
def calc_netadj_dg(self, data, loop_name):
"""
Calculates delta-g's from successive gravity observations
Parameters
----------
data : list
list of stations from which to calculate delta-g
loop_name : str
Stored with Delta object, used later in network adjustment
Returns
-------
list
List of deltas
"""
deltas = []
# Take the first station from the list, or None if there aren't any.
prev_station = data.pop(0) if data else None
for station in data:
delta = DeltaNormal(
prev_station,
station,
driftcorr="Adj.",
ls_drift=(loop_name, self.drift_polydegree_combobox.currentIndex() - 1),
loop=loop_name,
)
deltas.insert(0, delta)
prev_station = station
return deltas
@staticmethod
def calc_roman_dg(data, loop_name, time_threshold=None):
"""
Caculates delta-g between three station occupations (one station visited
once, one station visited twice) by interpolating drift at the latter station.
Accommodating the time threshold is tricky. for the plotting to be
correct the initial g subtracted from each measurement has to vary.
Parameters
----------
data : list
List of stations
loop_name : str
Loop name, stored in the delta objects to be created
Returns
-------
(roman_dg_model, avg_deltas, vert_lines)
tuple with 2 pyqt models (for dg samples and average dg) and
plot data for vertical lines
"""
# assumes stations in data are in chronological order
sample_deltas, vert_lines = drift_roman(data, loop_name, time_threshold)
# If there is more than one delta-g between a given station pair, average them
# Setup dict to store averages '(sta1, sta2)':[g]
avg_dg = dict()
unique_pairs = set()
for i, delta in enumerate(sample_deltas):
delta_key1 = (delta.station1.station_name, delta.station2[0].station_name)
delta_key2 = (delta.station2[0].station_name, delta.station1.station_name)
if delta_key1 not in unique_pairs and delta_key2 not in unique_pairs:
unique_pairs.add(delta_key1)
avg_dg[delta_key1] = [delta]
for ii in range(i + 1, len(sample_deltas)):
testdelta = sample_deltas[ii]
testdelta_key1 = (
testdelta.station1.station_name,
testdelta.station2[0].station_name,
)
testdelta_key2 = (
testdelta.station2[0].station_name,
testdelta.station1.station_name,
)
if delta_key1 == testdelta_key1 or delta_key1 == testdelta_key2:
avg_dg[delta_key1].append(testdelta)
avg_deltas = []
for station_pair in avg_dg.items():
avg_deltas.append(
DeltaList(None, station_pair[1], loop=loop_name, driftcorr="Roman")
)
return sample_deltas, avg_deltas, vert_lines
@staticmethod
def plot_tares(axes, obstreeloop):
"""
Plots a vertical line at the time of a tare
Parameters
----------
axes : Matpotlib Axes object
obstreeloop : ObsTreeLoop
"""
ylim = axes.get_ylim()
if len(obstreeloop.tares) > 0:
for tare in obstreeloop.tares:
x_time = tare.datetime
axes.plot([x_time, x_time], [ylim[0], ylim[1]], "gray")
axes.set_ylim(ylim)
axes.figure.canvas.draw()
def clear_axes(self):
"""
Clears plot axes
"""
self.axes_drift_single.cla()
self.axes_drift_cont_lower.clear()
self.axes_drift_cont_upper.clear()
self.drift_single_canvas.draw()
self.drift_cont_canvasbot.draw()
self.drift_cont_canvastop.draw()
def plot_drift(self, obstreeloop=None, update=True):
"""
Function to plot drift. Typically called from self.set_drift_method().
Parameters
----------
obstreeloop : ObsTreeLoop
Can either specify a loop, or by default use the currentLoopIndex.
update : bool
True if gui elements should be updated .
(Better performance if they're only updated when visible)
"""
QtWidgets.QApplication.setOverrideCursor(Qt.WaitCursor)
offset = 0
if type(obstreeloop) is not ObsTreeLoop:
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
obstreesurvey = obstreeloop.parent()
drift_type = obstreeloop.drift_method
plot_data = obstreeloop.get_data_for_plot()
self.parent.obsTreeModel.reset_assigned_sd()
# Check that there's station repeats. If there isn't, skip the plotting but
# we still want to calculate delta-g's (except for Roman correction).
no_data = True
if any([True for x in plot_data if len(x[0]) > 1]):
no_data = False
data = obstreeloop.checked_stations()
# Only include drift observations that meet time criteria
time_threshold = None
if self.drift_screen_elapsed_time.isChecked():
hour = self.drift_time_spinner.dateTime().time().hour()
minute = self.drift_time_spinner.dateTime().time().minute()
time_threshold = hour * 60 + minute
plot_data = self.screen_for_elapsed_time(
plot_data, elapsed_time=time_threshold
)
if drift_type == "none" or drift_type == "netadj":
# none, netadj, and roman all use axes_drift_single
deltas = None
if update:
self.axes_drift_single.cla()
logging.info("Plotting drift - no correction, Loop " + obstreeloop.name)
# Get data for plotting
for line in plot_data:
if len(line[0]) > 1:
# Make values relative to first station value
y = [f - line[1][0] + offset for f in line[1]]
x = [f for f in line[0]]
if update:
a = self.axes_drift_single.plot(x, y, ".-", picker=5)
a[0].name = line[2]
offset += self.offset_slider.value()
# Plot
if plot_data and not no_data and update:
self.axes_drift_single.xaxis.set_major_formatter(DateFormatter("%H:%M"))
self.axes_drift_single.yaxis.set_label_text(
"Change in gravity since initial \nstation occupation, "
+ "in microGal"
)
self.drift_fig.canvas.mpl_connect(
"pick_event",
lambda event: self.show_line_label(event, self.axes_drift_single),
)
self.plot_tares(self.axes_drift_single, obstreeloop)
elif update:
self.axes_drift_single.cla()
self.axes_drift_single.text(0.35, 0.5, "NO STATION REPEATS")
if update:
self.axes_drift_single.set_title(
"Survey " + obstreesurvey.name + ", Loop " + obstreeloop.name
)
self.drift_single_canvas.draw()
if drift_type == "none":
deltas = self.calc_none_dg(data, obstreeloop.name)
elif drift_type == "netadj":
deltas = self.calc_netadj_dg(data, obstreeloop.name)
QtWidgets.QApplication.restoreOverrideCursor()
elif drift_type == "continuous":
logging.info("Plotting continuous drift, Loop " + obstreeloop.name)
self.axes_drift_cont_lower.clear()
self.axes_drift_cont_upper.clear()
# Get data for plotting
min_time = 100000000
max_time = 0
drift_rate, drift_time, drift_x = [], [], []
for line in plot_data:
# x and y are the time and g values for each station.
# Make values relative to first station value
y = [f - line[1][0] + offset for f in line[1]]
x = [f for f in line[0]]
if min(x) < min_time:
min_time = min(x)
if max(x) > max_time:
max_time = max(x)
# Only bother plotting if there's more than one station (don't do
# this otherwise, otherwise singleton stations at the start or end of
# a survey won't be included when setting the min_time/max_time
if len(line[0]) > 1:
# Loop over the line vertices
for idx, obs in enumerate(y):
y[idx] = obs + offset
# get drift rate for bottom plot
if idx >= 1:
dr = (y[idx] - y[idx - 1]) / (
(x[idx] - x[idx - 1]) * 24
) # drift rate
drift_rate.append(dr)
xmean = np.mean([x[idx], x[idx - 1]])
drift_x.append(xmean)
# try:
drift_time.append(
dt.datetime.utcfromtimestamp(xmean * 86400.0)
)
# Plot horizontal extent
if self.drift_plot_hz_extent.isChecked() and update:
self.axes_drift_cont_lower.plot(
[x[idx], x[idx - 1]], [dr, dr], "-", color="0.5"
)
if update:
a = self.axes_drift_cont_upper.plot(x, y, ".-", picker=5)
a[0].name = line[2]
offset += self.offset_slider.value()
# Plot
if plot_data:
if update:
self.axes_drift_cont_upper.xaxis.set_major_formatter(
DateFormatter("%H:%M")
)
self.axes_drift_cont_lower.xaxis.set_major_formatter(
DateFormatter("%H:%M")
)
self.axes_drift_cont_lower.plot(
drift_time, drift_rate, ".", picker=2
)
xticks = self.axes_drift_cont_upper.get_xticks()
self.axes_drift_cont_lower.set_xticks(xticks)
xlims = self.axes_drift_cont_upper.get_xlim()
self.axes_drift_cont_lower.set_xlim(xlims)
self.axes_drift_cont_lower.yaxis.set_label_text(
"Drift rate,\nin microGal/hr"
)
self.axes_drift_cont_upper.yaxis.set_label_text(
"Drift, in microGal\n(arbitrary offset)"
)
self.drift_cont_figtop.canvas.mpl_connect(
"pick_event",
lambda event: self.show_line_label(
event, self.axes_drift_cont_upper
),
)
# drift_point_picked and drift_newpoint_picked are for
# adding/removing points to continuous drift curve - not yet
# implemented.
# self.drift_cont_figbot.canvas.mpl_connect(
# 'pick_event', self.drift_point_picked)
# self.drift_cont_figbot.canvas.mpl_connect(
# 'button_release_event', self.drift_newpoint_picked)
try:
z = []
deltas, xp, yp, z = drift_continuous(
data,
plot_data,
drift_x,
drift_rate,
self.drift_polydegree_combobox.currentIndex(),
self.tension_slider.value(),
self.drift_cont_startendcombobox.currentIndex(),
self.drift_plot_weighted.checkState(),
min_time,
max_time,
obstreeloop.name,
)
if update:
self.plot_tares(self.axes_drift_cont_lower, obstreeloop)
self.plot_tares(self.axes_drift_cont_upper, obstreeloop)
ln = self.axes_drift_cont_lower.plot(xp, yp, "k-")
if any(z):
textcolor = "k"
# type(z) = ndarray if constant drift
if len(z) == 1 and type(z[0]) is tuple:
mean_drift, sigma = z[0][0], z[0][1]
tstat = mean_drift / sigma
if (
np.abs(tstat) > 4.303
): # Critical value for 95% CI, 2 DOF, 2-tailed t-test
textcolor = "r"
z = [mean_drift]
format_str = {
1: "{:.2f} µGal/hr",
2: "{:.2f} µGal/hr per day",
3: "{:.2f}*t^2 {:+.2f}*t {:+.2f}",
4: "{:.2f}*t^3 {:+.2f}*t^2 {:+.2f}*t {:+.2f}",
}.get(len(z), "")
annot_text = format_str.format(*z)
annot = self.axes_drift_cont_lower.annotate(
annot_text,
xy=(737287, 45),
xytext=(-20, 20),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
color=textcolor,
)
# arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
x, y = ln[0].get_data()
annot.xy = (x[ind["ind"][0]], y[ind["ind"][0]])
def hover(event):
vis = annot.get_visible()
if event.inaxes == self.axes_drift_cont_lower:
cont, ind = ln[0].contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
# fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
self.drift_cont_figbot.canvas.draw_idle()
self.drift_cont_figbot.canvas.mpl_connect(
"motion_notify_event", hover
)
self.axes_drift_cont_lower.set_ylim(
np.round(min(drift_rate), 0) - 5,
np.round(max(drift_rate), 0) + 5,
)
self.axes_drift_cont_upper.set_title(
"Survey "
+ obstreesurvey.name
+ ", Loop "
+ obstreeloop.name
)
self.drift_cont_canvasbot.draw()
self.drift_cont_canvastop.draw()
QtWidgets.QApplication.restoreOverrideCursor()
except IndexError as e:
if self.drift_polydegree_combobox.currentIndex() == 1:
MessageBox.warning(
"Error",
"Insufficient drift observations for spline method",
)
else:
MessageBox.warning("Unknown error", "Index error")
self.drift_polydegree_combobox.setCurrentIndex(0)
except np.linalg.LinAlgError as e:
logging.error(e)
MessageBox.warning(
"Error",
"Insufficient drift observations for polynomial method",
)
self.drift_polydegree_combobox.setCurrentIndex(0)
obstreeloop.drift_cont_method = 0
else:
MessageBox.warning("No data available for plotting", "Plot error")
# Plots vertical dashed lines showing delta-g's
elif drift_type == "roman":
logging.info("Plotting Roman drift, Loop " + obstreeloop.name)
if update:
self.axes_drift_single.cla()
deltas = self.calc_roman_dg(data, obstreeloop.name, time_threshold)
for line in plot_data:
if len(line[0]) > 1:
# Make values relative to first station value
y = [f - line[1][0] for f in line[1]]
x = [dt.datetime.utcfromtimestamp(f * 86400.0) for f in line[0]]
a = self.axes_drift_single.plot(x, y, ".-", picker=5)
a[0].name = line[2]
for line in deltas[2]:
if update:
self.axes_drift_single.plot(line[0], line[1], "--")
if plot_data and update:
self.axes_drift_single.xaxis.set_major_formatter(DateFormatter("%H:%M"))
if update:
self.axes_drift_single.yaxis.set_label_text(
"Change in gravity since initial \nstation occupation, "
+ "in microGal"
)
self.drift_fig.canvas.mpl_connect(
"pick_event",
lambda event: self.show_line_label(event, self.axes_drift_single),
)
self.axes_drift_single.set_title(
"Survey " + obstreesurvey.name + ", Loop " + obstreeloop.name
)
self.drift_single_canvas.draw()
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QApplication.restoreOverrideCursor()
return deltas
@staticmethod
def show_all_columns(view):
"""
Helper function to reset columns in a view
Parameters
----------
delta_view : QTableView
"""
model = view.model()
for i in range(model.columnCount()):
view.showColumn(i)
def set_drift_method(self, update=True, update_adjust_tables=True):
"""
Called from update_drift_tables_and_plots + callback from GUI.
Initiates plotting on drift tab.
Parameters
----------
update : Boolean or int
Controls if plots are updated. For performance, it's set to false when
loading a file. It is an int when true because it's sent directly from a
callback.
update_adjust_tables : bool
We don't want to update the adjust tables when loading a workspace (we
want the deltas to be generated from the loop/station data)
"""
if (
self.parent.index_current_loop is None
): # Prevents crashing if no data are loaded
return
if type(update) is int:
update = True
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
method_key = self.driftmethod_combobox.currentIndex()
inv_drift_lookup = {v: k for k, v in self.parent.drift_lookup.items()}
method = inv_drift_lookup[method_key]
logging.info("Drift method set to " + method)
obstreeloop.drift_method = method
# These control the visibility of different tables
# update is an int (index of menu item) when this function is called from the
# menu-item callback
if update:
width = self.drift_window.sizes()
if method == "continuous":
self.drift_polydegree_combobox.setCurrentIndex(
obstreeloop.drift_cont_method
)
self.drift_cont_startendcombobox.setCurrentIndex(
obstreeloop.drift_cont_startend
)
self.drift_plot_weighted.setCheckState(
obstreeloop.drift_cont_weighting
)
self.drift_screen_elapsed_time.setCheckState(
obstreeloop.time_extent_check
)
self.drift_time_spinner.setTime(
QtCore.QTime(*obstreeloop.time_extent_time)
)
self.drift_continuous()
else:
self.disable_weighted_checkbox()
if method == "none":
self.drift_none()
if method == "netadj":
self.drift_polydegree_combobox.setCurrentIndex(
obstreeloop.drift_netadj_method
)
self.drift_adjust()
if method == "roman":
self.drift_screen_elapsed_time.setCheckState(
obstreeloop.time_extent_check
)
self.drift_time_spinner.setTime(
QtCore.QTime(*obstreeloop.time_extent_time)
)
self.drift_roman()
self.set_width(width, method)
model = self.plot_drift(update=update)
self.update_delta_model(method, model)
# Don't want to update if only switching between loops
if update_adjust_tables:
self.update_deltas_on_adj_tab(obstreeloop)
self.parent.adjust_update_required()
# When loading a workspace, deltas[0] will be a dict, meaning
# we don't want to update the adjust tables at this point.
# Otherwise the normal operation when the plots are update:
try:
if (
type(
self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_survey
).deltas[0]
)
!= dict
):
self.parent.update_adjust_tables()
except IndexError:
pass
except TypeError:
self.parent.update_adjust_tables()
def update_deltas_on_adj_tab(self, obstreeloop):
"""
After creating/modifying deltas on the drift tab, this clears the deltas for
that loop from the adjust tab, and replaces them with the new deltas.
Parameters
----------
obstreeloop : ObsTreeLoop
"""
survey = obstreeloop.parent()
# Remove old deltas
try:
loop_present = obstreeloop.name in survey.loops_with_deltas()
if loop_present:
# Remove the old deltas that correspond to this loop
for delta in reversed(survey.deltas):
try:
if delta["loop"] == obstreeloop.name:
survey.deltas.remove(delta)
except TypeError:
if delta.loop == obstreeloop.name:
survey.deltas.remove(delta)
survey.deltas += obstreeloop.deltas
self.parent.set_adj_sd(
survey, survey.adjustment.adjustmentoptions, loop=obstreeloop
)
except TypeError: # No loops with deltas
pass
def set_width(self, width, method):
"""
Maintains relative width of plot windows when switching between
drift-correction methods.
Parameters
----------
width : list
Width of the elements in the middle window of the drift tab, in pixels
[single plot, double continuous plot, tools, spacer]
Either element 1 or 2 will be set to 0 depending on the drift correction
method.
method : {"none", "netadj", "roman", "continuous"}
Drift correction method, controls which plots are shown
"""
if all(w == 0 for w in width): # default is [0, 0, 0]
self.drift_window.setSizes([900, 0, 500, 2000])
return
if method == "none" or method == "netadj" or method == "roman":
# Order so larger of first two values is first.
width[:2] = sorted(width[:2], reverse=True)
else:
# Order so larger of first two values is last.
width[:2] = sorted(width[:2])
self.drift_window.setSizes(width)
def update_delta_model(self, method, model):
"""
Show appropriate delta model for the selected loop.
This method takes the model generated by plot_drift() and assigns it to
the delta_view on the drift tab.
Parameters
----------
method : {"none", "netadj", "roman", "continuous"}
If 'roman', show sample and average models. Otherwise, show a single model.
model : list or tuple
A tuple of lists (Roman method) or a list of deltas (other methods)
"""
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
if model:
if method == "roman":
# Hide drift correction, std_for_adj, and residual columns
obstreeloop.deltas = model[1]
self.dg_samples_view.model().sourceModel().init_data(model[0])
self.delta_view.model().init_data(model[1])
self.show_all_columns(self.delta_view)
self.delta_view.hideColumn(2)
self.delta_view.hideColumn(5)
self.delta_view.hideColumn(7)
self.delta_view.hideColumn(8)
else:
obstreeloop.deltas = model
self.delta_view.model().init_data(model)
self.tare_view.model().init_data(obstreeloop.tares)
# Hide std_for_adj and residual columns
self.show_all_columns(self.delta_view)
self.delta_view.hideColumn(2)
self.delta_view.hideColumn(7)
self.delta_view.hideColumn(8)
def tare_context_menu(self, point):
"""
Right-click context menu on tare table
Parameters
----------
point : PyQt reference to click point
Determines where to show popup.
"""
selected = self.tare_view.selectedIndexes()
if selected:
self.tare_popup_menu.addAction(self.mnDeleteTare)
self.tare_popup_menu.exec_(self.tare_view.mapToGlobal(point))
def drift_adjust(self):
"""
Update which PyQt tables are shown
"""
self.drift_single_canvas.show()
self.drift_single_canvas.setMinimumWidth(700)
self.drift_cont_plotpanel.hide()
self.cont_label_widget.show()
self.cont_label_widget.setMinimumHeight(50)
self.roman_label_widget.hide()
self.drift_window.setMinimumHeight(200)
self.tension_slider.setEnabled(False)
# Disable the 'none' and 'spline' options, they're not relevant
self.drift_polydegree_combobox.model().item(0).setEnabled(False)
self.drift_polydegree_combobox.model().item(1).setEnabled(False)
self.drift_polydegree_combobox.setEnabled(True)
self.drift_cont_startendcombobox.setEnabled(False)
self.drift_plot_hz_extent.setEnabled(False)
self.drift_plot_weighted.setEnabled(False)
# self.delta_view.show()
self.dg_samples_view.hide()
def drift_roman(self):
"""
Update which PyQt tables are shown
"""
self.roman_label_widget.show()
self.drift_single_canvas.show()
self.drift_single_canvas.setMinimumWidth(700)
self.drift_cont_plotpanel.hide()
self.dg_samples_view.show()
self.cont_label_widget.hide()
self.roman_label_widget.show()
self.roman_label_widget.setMinimumHeight(50)
self.drift_window.setMinimumHeight(200)
self.tension_slider.setEnabled(False)
self.offset_slider.setEnabled(False)
self.drift_plot_hz_extent.setEnabled(False)
self.drift_plot_weighted.setEnabled(False)
self.drift_cont_startendcombobox.setEnabled(False)
self.drift_polydegree_combobox.setEnabled(False)
def drift_continuous(self):
"""
Update which PyQt tables are shown
"""
self.drift_single_canvas.hide()
self.drift_cont_plotpanel.show()
self.drift_cont_plotpanel.setMinimumWidth(700)
self.dg_samples_view.hide()
# Hide std_for_adj and residual columns
self.show_all_columns(self.delta_view)
self.delta_view.hideColumn(8)
self.delta_view.hideColumn(9)
self.cont_label_widget.show()
self.cont_label_widget.setMinimumHeight(50)
self.roman_label_widget.hide()
self.drift_window.setMinimumHeight(200)
# Re-enable these options (they're disabled if netadj drift was selected)
self.drift_polydegree_combobox.model().item(0).setEnabled(True)
self.drift_polydegree_combobox.model().item(1).setEnabled(True)
self.tension_slider.setEnabled(True)
self.offset_slider.setEnabled(True)
self.drift_plot_hz_extent.setEnabled(True)
self.drift_cont_startendcombobox.setEnabled(True)
self.drift_polydegree_combobox.setEnabled(True)
if self.drift_polydegree_combobox.currentIndex() == 0:
self.enable_weighted_checkbox()
else:
self.disable_weighted_checkbox()
def drift_none(self):
"""
Update which PyQt tables are shown
"""
self.drift_single_canvas.show()
self.drift_single_canvas.setMinimumWidth(700)
self.drift_cont_plotpanel.hide()
self.cont_label_widget.show()
self.cont_label_widget.setMinimumHeight(50)
self.roman_label_widget.hide()
self.drift_window.setMinimumHeight(200)
self.tension_slider.setEnabled(False)
self.offset_slider.setEnabled(True)
self.drift_polydegree_combobox.setEnabled(False)
self.drift_cont_startendcombobox.setEnabled(False)
self.drift_plot_hz_extent.setEnabled(False)
self.drift_plot_weighted.setEnabled(False)
self.dg_samples_view.hide()
def disable_weighted_checkbox(self):
self.drift_plot_weighted.setEnabled(False)
self.drift_plot_weighted.setToolTip(
"Weighted observations is only enabled when Continuous "
"model drift correction method and Constant drift model type are selected."
)
def enable_weighted_checkbox(self):
self.drift_plot_weighted.setEnabled(True)
self.drift_plot_weighted.setToolTip("")
def drift_combobox_updated(self):
"""
Called when either the drift poly degree or extrapolate/constant
combobox is changed.
"""
method_key = self.drift_polydegree_combobox.currentIndex()
startend_key = self.drift_cont_startendcombobox.currentIndex()
obstreeloop = self.parent.obsTreeModel.itemFromIndex(
self.parent.index_current_loop
)
drift_method = obstreeloop.drift_method
if drift_method == "continuous":
obstreeloop.drift_cont_method = method_key
obstreeloop.drift_cont_startend = startend_key
if method_key == 1:
self.tension_slider.setEnabled(True)
else:
self.tension_slider.setEnabled(False)
if method_key == 0:
self.enable_weighted_checkbox()
else:
self.disable_weighted_checkbox()
elif drift_method == "netadj":
obstreeloop.drift_netadj_method = method_key
self.parent.update_drift_tables_and_plots()
class CustomCheckBox(QtWidgets.QCheckBox):
def __init__(self, *args, **kwargs):
super(CustomCheckBox, self).__init__(*args, **kwargs)
update_drift_plots = QtCore.pyqtSignal()
class CustomComboBox(QtWidgets.QComboBox):
def __init__(self, *args, **kwargs):
super(CustomCheckBox, self).__init__(*args, **kwargs)
update_drift_plots = QtCore.pyqtSignal()
```
#### File: gsadjust/models/datum.py
```python
from PyQt5.QtCore import QAbstractTableModel, QModelIndex, Qt, QVariant, pyqtSignal
from .utils import format_numeric_column
# Constants for column headers
(
DATUM_STATION,
DATUM_DATE,
DATUM_G,
DATUM_SD,
N_SETS,
MEAS_HEIGHT,
GRADIENT,
DATUM_RESIDUAL,
) = range(8)
class tempStation:
def __init__(self, station):
self.__dict__ = station
# noinspection PyUnresolvedReferences
class DatumTableModel(QAbstractTableModel):
"""
Model to store Datums, shown on the adjust tab.
"""
_headers = { # As map, so do not need to be kept in order with the above.
DATUM_STATION: "Station",
DATUM_G: "g",
DATUM_SD: "Std. dev.",
DATUM_DATE: "Date",
MEAS_HEIGHT: "Meas. height",
GRADIENT: "Gradient",
DATUM_RESIDUAL: "Residual",
N_SETS: "# sets",
}
_attrs = { # From column constants to object attributes, for setting.
DATUM_STATION: ("station", str),
DATUM_G: ("g", float),
DATUM_SD: ("sd", float),
DATUM_DATE: ("date", lambda x: x), # pass through
MEAS_HEIGHT: ("meas_height", float),
GRADIENT: ("gradient", float),
}
signal_adjust_update_required = pyqtSignal()
signal_datum_table_updated = pyqtSignal()
def __init__(self):
super(DatumTableModel, self).__init__()
self._data = []
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft | Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight | Qt.AlignVCenter))
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self._headers.get(section, section + 1)
def insertRows(self, datum, position, rows=1, index=QModelIndex()):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
self._data.append(datum)
self.endInsertRows()
def removeRow(self, index):
datum = self.data(index, role=Qt.UserRole)
self.beginRemoveRows(index, index.row(), 1)
self._data.remove(datum)
self.endRemoveRows()
self.beginResetModel()
self.endResetModel()
def rowCount(self, parent=None):
return len(self._data)
def columnCount(self, parent=None):
return len(self._headers)
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
datum = self._data[index.row()]
column = index.column()
if role == Qt.DisplayRole or role == Qt.EditRole:
# To accommodate old save files
def get_nsets():
try:
return datum.n_sets
except:
return "NA"
fn, *args = {
DATUM_SD: (format, datum.sd, "0.2f"),
DATUM_G: (format, datum.g, "8.1f"),
DATUM_STATION: (str, datum.station),
DATUM_DATE: (str, datum.date),
MEAS_HEIGHT: (format, datum.meas_height, "0.2f"),
GRADIENT: (format, datum.gradient, "0.2f"),
DATUM_RESIDUAL: (format, datum.residual, "0.1f"),
N_SETS: (get_nsets,),
}.get(column, (format_numeric_column, column))
return fn(*args)
elif role == Qt.CheckStateRole:
# check status definition
if index.column() == 0:
return self.checkState(datum)
elif role == Qt.UserRole:
# check status definition
return datum
def checkState(self, datum):
"""
By default, everything is checked. If keepdata property from the
ChannelList object is 0, it is unchecked
"""
if datum.checked == 0:
return Qt.Unchecked
else:
return Qt.Checked
def setData(self, index, value, role):
"""
If a row is unchecked, update the keepdata value to 0 setData launched
when role is acting value is Qt.Checked or Qt.Unchecked
"""
if role == Qt.CheckStateRole and index.column() == 0:
datum = self._data[index.row()]
if value == Qt.Checked:
datum.checked = 2
elif value == Qt.Unchecked:
datum.checked = 0
self.dataChanged.emit(index, index, [])
self.signal_adjust_update_required.emit()
return True
if role == Qt.EditRole:
if index.isValid() and 0 <= index.row():
if value:
try:
datum = self._data[index.row()]
column = index.column()
# Ideally they other columns wouldn't be editable at all, but
# the user can select them and enter new values. Here we
# discard them unless they're in an editable column.
#
# Should me able to make non-editable columns readonly using a
# proxy model, e.g.
# https://stackoverflow.com/questions/22886912
if column in [
DATUM_STATION,
DATUM_DATE,
DATUM_G,
DATUM_SD,
MEAS_HEIGHT,
GRADIENT,
]:
attr, vartype = self._attrs.get(column, (None, None))
if attr:
setattr(datum, attr, vartype(value))
self.dataChanged.emit(index, index, [Qt.EditRole])
except ValueError:
pass
return True
if role == Qt.UserRole:
self._data[index.row()] = value
self.dataChanged.emit(index, index, [])
def flags(self, index):
return (
Qt.ItemIsUserCheckable
| Qt.ItemIsEnabled
| Qt.ItemIsSelectable
| Qt.ItemIsEditable
)
def clearDatums(self):
self.beginRemoveRows(self.index(0, 0), 0, self.rowCount())
self._data = []
self.endRemoveRows()
# The ResetModel calls is necessary to remove blank rows from the table view.
self.beginResetModel()
self.endResetModel()
return QVariant()
def datum_names(self):
dn = []
for datum in self._data:
dn.append(datum.station)
return dn
def init_data(self, data):
self.beginResetModel()
self._data = data
self.endResetModel()
self.layoutChanged.emit() # Refresh whole view.)
```
#### File: gsadjust/models/gravity.py
```python
import numpy as np
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
class GravityChangeModel(QtCore.QAbstractTableModel):
"""
Model to store gravity change between surveys.
There is only one such model per campaign. Gravity change is calculated when the
respective menu item is chosen.
"""
def __init__(self, header, table, table_type="simple", parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._headers = {n: col for n, col in enumerate(header)}
self.createArrayData(table, table_type)
self.table_type = table_type
def createArrayData(self, table, table_type):
if table_type == "simple" or table_type == "list":
array = np.array(table).transpose()
elif table_type == "full":
array = np.array(table)
self.arraydata = array
def rowCount(self, parent=None):
return self.arraydata.shape[0]
def columnCount(self, parent=None):
return len(self._headers)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
row = index.row()
column = index.column()
try:
value = float(self.arraydata[row][column])
if self.table_type != 'full':
return format(value, "0.1f")
elif column != 1 and column != 2:
return format(value, "0.1f")
else:
return format(value, "0.5f")
except ValueError:
return str(self.arraydata[row][column])
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self._headers.get(section, section + 1)
```
#### File: gsadjust/obstree/base.py
```python
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
class ObsTreeItemBase(QtGui.QStandardItem):
"""
Base tree-view item used to populate data tree, used for Surveys, Loops,
and Stations. Not used directly but inherited by ObsTreeStation, ...Loop,
and ...Survey
"""
def __init__(self):
super(ObsTreeItemBase, self).__init__()
self.setFlags(
self.flags() | Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsUserCheckable
)
self.setCheckState(Qt.Checked)
self.fontweight = QtGui.QFont.Normal
self.cellcolor = Qt.white
```
#### File: gsadjust/plots/network.py
```python
import matplotlib
import networkx as nx
import numpy as np
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as Toolbar
from ..gui.messages import MessageBox
class PlotNetworkGraph(QtWidgets.QDialog):
"""
Networkx plot of network. If shape == 'map', accurate coordinates must be
present in the input file.
Parameters
----------
survey
coords
shape : {'Circular', 'map'}
"""
def __init__(self, survey, coords, shape="circular", parent=None):
super(PlotNetworkGraph, self).__init__(parent)
self.setWindowTitle("Network graph, Survey " + survey.name)
self.survey = survey
self.coords = coords
self.shape = shape
self.figure = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = Toolbar(self.canvas, self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
self.plot_network()
def plot_network(self):
try:
edges, disabled_edge, datum_nodelist, nondatum_nodelist = self.get_data()
self.plot(edges, disabled_edge, datum_nodelist, nondatum_nodelist)
except KeyError as e:
MessageBox.warning(
"Plot error", "Error plotting network graph (Key error)",
)
def get_data(self):
edges = nx.MultiGraph()
disabled_edges = nx.MultiGraph()
datum_nodelist, nondatum_nodelist = [], []
deltas = self.survey.deltas
if len(deltas) == 0:
MessageBox.warning(
"Plot error",
"Delta table is empty. Unable to plot network graph",
)
else:
for i, delta in enumerate(deltas):
key = f"delta_{i}"
if delta.checked:
edges.add_edge(delta.sta1, delta.sta2, key=key)
else:
disabled_edges.add_edge(delta.sta1, delta.sta2, key=key)
datum_names = [datum.station for datum in self.survey.datums]
for station_name in [delta.sta1, delta.sta2]:
if station_name in datum_names:
if station_name not in datum_nodelist:
datum_nodelist.append(station_name)
continue
elif station_name not in nondatum_nodelist:
nondatum_nodelist.append(station_name)
edges.add_node(station_name)
disabled_edges.add_node(station_name)
return (edges, disabled_edges, datum_nodelist, nondatum_nodelist)
def format_map_axis(self, ax, shape):
if shape == "circular":
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
elif shape == "map":
border = 0.1
self.figure.tight_layout()
xrange = np.abs(self.xmax - self.xmin)
yrange = np.abs(self.ymax - self.ymin)
ax.set_xlim(self.xmin - xrange * border, self.xmax + xrange * border)
ax.set_ylim(self.ymin - yrange * border, self.ymax + yrange * border)
ax.ticklabel_format(useOffset=False)
ax.set_xlabel("(Coordinates are not projected)")
def plot(self, edges, disabled_edges, datum_nodelist, nondatum_nodelist):
self.figure.clear()
H = nx.Graph(edges)
if self.shape == "circular":
pos = nx.circular_layout(H)
elif self.shape == "map":
pos = {}
for k, v in self.coords.items():
pos[k] = (v[0], v[1])
self.xmin = min([x[0] for x in pos.values()])
self.ymin = min([x[1] for x in pos.values()])
self.xmax = max([x[0] for x in pos.values()])
self.ymax = max([x[1] for x in pos.values()])
if not nx.is_connected(H):
gs = [H.subgraph(c) for c in nx.connected_components(H)]
for idx, g in enumerate(gs):
ax = self.figure.add_subplot(1, len(gs), idx + 1)
nx.draw_networkx_edges(
g, pos, ax=ax, width=1, alpha=0.4, node_size=0, edge_color="k"
)
nx.draw_networkx_nodes(
g, pos, ax=ax, node_color="w", alpha=0.4, with_labels=True
)
nx.draw_networkx_labels(g, pos, ax=ax, font_color="orange")
ax.set_title("Networks are disconnected!")
self.format_map_axis(ax, self.shape)
else:
# edge width is proportional to number of delta-g's
edgewidth = []
ax = self.figure.add_subplot(111)
for (u, v, d) in H.edges(data=True):
edgewidth.append(len(edges.get_edge_data(u, v)) * 2 - 1)
nx.draw_networkx_edges(
H, pos, ax=ax, width=edgewidth, alpha=0.4, node_size=0, edge_color="k"
)
nx.draw_networkx_edges(
disabled_edges,
pos,
ax=ax,
width=1,
alpha=0.4,
node_size=0,
edge_color="r",
)
nx.draw_networkx_nodes(
H,
pos,
ax=ax,
node_size=120,
nodelist=datum_nodelist,
node_color="k",
node_shape="^",
with_labels=True,
alpha=0.8,
)
nx.draw_networkx_nodes(
H,
pos,
ax=ax,
node_size=120,
nodelist=nondatum_nodelist,
node_color="k",
node_shape="o",
with_labels=True,
alpha=0.3,
)
nx.draw_networkx_labels(H, pos, ax=ax, font_color="r")
self.format_map_axis(ax, self.shape)
self.canvas.draw()
```
#### File: sgp-gsadjust/gsadjust/threads.py
```python
from PyQt5.QtCore import (
QObject,
QRunnable,
QThread,
pyqtSignal,
pyqtSlot,
)
class RunnerKilledException(Exception):
pass
class GenericSignals(QObject):
finished = pyqtSignal()
error = pyqtSignal(object)
data = pyqtSignal(object)
result = pyqtSignal(object)
class RunnerBase(QRunnable):
signals = GenericSignals()
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = args
self.is_paused = False
self.is_killed = False
@pyqtSlot()
def run(self):
raise NotImplementedError
def kill(self):
self.is_killed = True
def pause(self, pause=True):
self.is_paused = pause
class ThreadBase(QThread):
signals = GenericSignals()
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = args
self.is_paused = False
self.is_killed = False
@pyqtSlot()
def run(self):
raise NotImplementedError
def kill(self):
self.is_killed = True
def pause(self, pause=True):
self.is_paused = pause
```
#### File: sgp-gsadjust/gsadjust/utils.py
```python
def index_or_none(l, i):
if i not in l:
return None
return l.index(i)
def init_cal_coeff_dict(obstreemodel):
"""
Initiate dict for storing meter calibration coefficients.
Parameters
----------
obstreemodel : ObsTreeModel
Returns
-------
dict
key: Meter (str), value: float
"""
try:
meter_list = {}
for i in range(obstreemodel.invisibleRootItem().rowCount()):
survey = obstreemodel.invisibleRootItem().child(i)
for ii in range(survey.rowCount()):
loop = survey.child(ii)
if loop.meter not in meter_list:
meter_list[loop.meter] = 1.000
return meter_list
except Exception:
return None
def init_station_coords_dict(obstreemodel):
"""
Stores a single set of coordinates for each station with the obsTreeModel
object. The coordinates of the last
Station in the Survey > Loop > Station hierarchy will be used.
"""
station_coords = dict()
for i in range(obstreemodel.invisibleRootItem().rowCount()):
survey = obstreemodel.invisibleRootItem().child(i)
for ii in range(survey.rowCount()):
loop = survey.child(ii)
for iii in range(loop.rowCount()):
station = loop.child(iii)
try:
station_coords[station.station_name] = (
station.long[0],
station.lat[0],
station.elev[0],
)
except Exception:
station_coords[station.station_name] = (0, 0, 0)
return station_coords
```
#### File: sgp-gsadjust/tests/test_gui.py
```python
import os
import numpy as np
import pytest
from PyQt5 import QtCore, QtWidgets
import gsadjust
from gsadjust.gui.dialogs import (
AddDatumFromList )
import pytestqt
# from tide_correction import tide_correction_agnew
@pytest.mark.skipif("TRAVIS" in os.environ, reason="Doesn't work on Travis")
def test_gui(qtbot, monkeypatch):
# Not sure why, but need to store and restore the path after this test
pwd = os.getcwd()
window = gsadjust.GSadjust.MainProg()
# window.show()
qtbot.addWidget(window)
window.show()
qtbot.wait(1000)
# Open data
window.open_raw_data(r'.\tests\test_BurrisData2.txt', 'Burris')
window.init_gui()
assert window.obsTreeModel.rowCount() == 1
# Update tide correction
# Not checking that the correction is correct, just that it's been updated.
# Burris meters only report correction to nearest microGal, this test will fail if the correction wasn't updated.
# tide_correction_agnew(window, 35.0, -110.0, 1000.0)
# assert (
# np.abs(
# window.obsTreeModel.invisibleRootItem().child(0).child(0).child(0).etc[0]
# + 20.6
# )
# < 0.01
# )
test_workspace = 'test1.gsa'
success = window.obsTreeModel.save_workspace(test_workspace)
assert success == 'test1.gsa'
# Divide into loops
window.divide_survey(8 / 24)
qtbot.wait(2000)
survey = window.obsTreeModel.invisibleRootItem().child(0)
loop = survey.child(0)
assert loop.rowCount() == 12
assert survey.rowCount() == 3
# Change to Drift tab
window.tab_widget.setCurrentIndex(1)
# Step through loops
window.activate_survey_or_loop(survey.child(1).index())
qtbot.wait(1000)
window.activate_survey_or_loop(survey.child(2).index())
qtbot.wait(1000)
window.activate_survey_or_loop(survey.child(0).index())
qtbot.wait(2000)
# Step through drift-correction styles
window.tab_drift.driftmethod_combobox.setCurrentIndex(1)
window.tab_drift.set_drift_method()
qtbot.wait(1000)
window.tab_drift.driftmethod_combobox.setCurrentIndex(2)
window.tab_drift.set_drift_method()
qtbot.wait(1000)
window.tab_drift.driftmethod_combobox.setCurrentIndex(3)
window.tab_drift.set_drift_method()
qtbot.wait(1000)
# window.tab_drift.driftmethod_combobox.setCurrentIndex(1)
# window.tab_drift.set_drift_method()
# qtbot.wait(1000)
window.tab_drift.drift_plot_weighted.setCheckState(QtCore.Qt.Checked)
window.tab_drift.set_drift_method()
qtbot.wait(1000)
window.tab_drift.drift_polydegree_combobox.setCurrentIndex(1)
window.tab_drift.drift_combobox_updated()
qtbot.wait(1000)
window.tab_drift.drift_polydegree_combobox.setCurrentIndex(2)
window.tab_drift.drift_combobox_updated()
qtbot.wait(1000)
window.tab_drift.drift_polydegree_combobox.setCurrentIndex(0)
window.tab_drift.drift_cont_startendcombobox.setCurrentIndex(1)
window.tab_drift.drift_combobox_updated()
qtbot.wait(3000)
window.tab_widget.setCurrentIndex(2)
# Populate delta and datum tables
qtbot.keyClick(window, 'a', modifier=QtCore.Qt.ControlModifier)
qtbot.wait(3000)
monkeypatch.setattr(
AddDatumFromList, 'add_datum', classmethod(lambda *args: 'CDOT')
)
adj_tab_model = window.tab_adjust.delta_view.model()
first_adj_tab_delta = adj_tab_model.data(
adj_tab_model.index(0, 4), QtCore.Qt.UserRole
)
drift_tab_model = window.tab_drift.delta_view.model()
first_drift_tab_delta = drift_tab_model.data(
drift_tab_model.index(0, 4), QtCore.Qt.UserRole
)
# TODO: these are sorted differently on the drift and NA tabs, so we can't just grab the first ones.
# assert first_adj_tab_delta.dg == first_drift_tab_delta.dg
# assert first_adj_tab_delta.driftcorr == first_drift_tab_delta.driftcorr
qtbot.keyClick(window, 'd', modifier=QtCore.Qt.ControlModifier)
window.adjust_network()
# Verify gravnet input
assert len(survey.results) == 30
assert (
len(survey.adjustment.results_string()) == 12
) # number of lines in Numpy output
assert survey.adjustment.adjustmentresults.n_deltas == 83
assert survey.adjustment.adjustmentresults.n_datums == 1
test_workspace = 'test1.gsa'
success = window.obsTreeModel.save_workspace(test_workspace)
assert success == 'test1.gsa'
window.workspace_clear(confirm=False)
assert window.obsTreeModel.rowCount() == 0
window.workspace_open_json(test_workspace)
survey = window.obsTreeModel.invisibleRootItem().child(0)
loop = survey.child(0)
assert loop.rowCount() == 12
assert survey.rowCount() == 3
window.menus.mnAdjGravnet.setChecked(True)
qtbot.wait(2000)
def on_timeout():
messagebox = QtWidgets.QApplication.activeWindow()
qtbot.keyClick(messagebox, QtCore.Qt.Key_Enter)
QtCore.QTimer.singleShot(3000, on_timeout)
window.adjust_network()
# qtbot.keyClick(messagebox, QtCore.Qt.Key_Enter)
window.menus.mnAdjPyLSQ.setChecked(True)
window.adjust_network()
for line in survey.adjustment.results_string():
elems = line.split(' ')
if elems[0] == 'SD':
sd0 = float(elems[-1])
# Disable some observations, save workspace, clear and reload, verify that we get the same adjustment results
rows = [1, 3, 5]
for row in rows:
survey.deltas[row].checked = 0
window.adjust_network()
for line in survey.adjustment.results_string():
elems = line.split(' ')
if elems[0] == 'SD':
sd1 = float(elems[-1])
# Adjustment results should be different with some observations disabled
assert abs(sd0 - sd1) > 0.01
QtCore.QTimer.singleShot(1000, on_timeout)
success = window.workspace_save()
assert success == True
qtbot.wait(2000)
window.workspace_clear(confirm=False)
assert window.obsTreeModel.rowCount() == 0
window.workspace_open_json(test_workspace)
window.adjust_network()
for line in survey.adjustment.results_string():
elems = line.split(' ')
if elems[0] == 'SD':
sd2 = float(elems[-1])
assert abs(sd1 - sd2) < 0.000001
os.remove(test_workspace)
# window.workspace_clear(confirm=False)
# window.workspace_open_json('./tests/test_workspace1.gsa')
# window.workspace_append()
# qtbot.keyPress(window, QtCore.Qt.Key_Tab, QtCore.Qt.ControlModifier)
# qtbot.mouseClick(options_dialog.ok_button, QtCore.Qt.LeftButton, delay=1000)
window.close()
os.chdir(pwd)
```
#### File: sgp-gsadjust/tests/test_import.py
```python
import pytest
import os
import gsadjust
from gsadjust.file.read import read_csv, read_cg6, read_cg6tsoft
def test_import_csv():
data_file = os.path.join(
os.getcwd(), 'test_data', 'field', 'CG-6', 'CG-6_TestData.dat'
)
with pytest.raises(IndexError):
with open(data_file, 'r') as fh:
data = read_csv(fh)
data_file = os.path.join(os.getcwd(), 'tests', 'test_csv_data.csv')
with open(data_file, 'r') as fh:
data = read_csv(fh)
assert len(data.raw_grav) == 95
def test_import_CG6():
data_file = os.path.join(
os.getcwd(), 'test_data', 'field', 'CG-6', 'CG-6_TestData.dat'
)
with open(data_file, 'r') as fh:
data = read_cg6(fh)
assert len(data.raw_grav) == 43
data_file = os.path.join(os.getcwd(), 'tests','test_csv_data.csv')
with pytest.raises(IndexError):
with open(data_file, 'r') as fh:
data = read_cg6(fh)
def test_import_CG6tsoft():
data_file = os.path.join(
os.getcwd(), 'test_data', 'field', 'CG-6', 'CG-6_TsoftFormat.DAT'
)
with open(data_file, 'r') as fh:
data = read_cg6tsoft(fh)
assert len(data.raw_grav) == 5515
data_file = os.path.join(os.getcwd(), 'tests', 'test_csv_data.csv')
with pytest.raises(ValueError):
with open(data_file, 'r') as fh:
data = read_cg6tsoft(fh)
```
#### File: sgp-gsadjust/tests/test_inversion.py
```python
import sys, os
import pytest
def test_numpy_inversion(obstreesurvey):
import numpy as np
# Test problem from Adjustment Computations Spatial Data Analysis, Ghilani and Wolf, Wiley
# ftp://doc.nit.ac.ir/civil/m.abbaszadeh/Theory%20of%20Errors%20and%20Adjustment/
# ebooksclub.org__Adjustment_Computations__Spatial_Data_Analysis.pdf
A = [[1, 0, 0], [-1, 1, 0], [0, -1, 1], [0, 0, -1], [-1, 0, 1], [0, 1, 0]]
obstreesurvey.adjustment.A = np.array(A)
P = [
[1 / 0.006 ** 2, 0, 0, 0, 0, 0],
[0, 1 / 0.004 ** 2, 0, 0, 0, 0],
[0, 0, 1 / 0.005 ** 2, 0, 0, 0],
[0, 0, 0, 1 / 0.003 ** 2, 0, 0],
[0, 0, 0, 0, 1 / 0.004 ** 2, 0],
[0, 0, 0, 0, 0, 1 / 0.012 ** 2],
]
obstreesurvey.adjustment.P = np.array(P)
obs = [448.105, 5.360, -8.523, -444.944, -3.167, 453.477]
obstreesurvey.adjustment.Obs = obs
obstreesurvey.adjustment.dof = 3
obstreesurvey.adjustment.python_lsq_inversion()
answer = np.array([448.1087, 453.4685, 444.9436])
answer_sd = 0.6575
diff = answer - obstreesurvey.adjustment.X
assert max(abs(diff)) < 0.0001
assert obstreesurvey.adjustment.SDaposteriori - answer_sd < 0.0001
``` |
{
"source": "jkennedy-usgs/sgp-utils",
"score": 3
} |
#### File: sgp-utils/ingestor/cosmos.py
```python
import pandas as pd
from PyQt5.QtWidgets import QMessageBox
DIST_CRITERIA = 0.002
SPEED_CRITERIA = 0.5
class CR_data:
cosmos_fields = ['RecordNum', 'Date Time(UTC)', 'PTB110_mb', 'P4_mb', 'P1_mb', 'T1_C', 'RH1', 'T_CS215', 'RH_CS215',
'Vbat', 'N1Cts', 'N2Cts', 'N1ETsec ', 'N2ETsec ', 'N1T(C)', 'N1RH ', 'N2T(C)', 'N2RH',
'GpsUTC', 'LatDec', 'LongDec', 'Alt', 'Qual', 'NumSats', 'HDOP', 'Speed_kmh', 'COG',
'SpeedQuality', 'strDate']
def __init__(self):
super(CR_data, self).__init__()
self.df = None
self.file = None
def load_data_from_file(self, fname):
self.file = fname
exclude_cols = [1, len(self.cosmos_fields)-2]
data = []
try:
fid = open(fname, 'r')
except IOError:
QMessageBox.critical(None, 'File error', 'Error opening COSMOS file')
return False
else:
with fid:
for line in fid:
if line[:2] != '//':
data_elements = line.split(',')
try:
d = [float(x.strip()) if idx not in exclude_cols else x for idx, x in enumerate(data_elements)]
data.append(d)
except:
continue
df = pd.DataFrame(data, columns=self.cosmos_fields)
df['dist'] = (df['LatDec'].diff() ** 2 + df['LongDec'].diff() ** 2).pow(1. / 2)
df['dt'] = pd.to_datetime(df['Date Time(UTC)'])
df['Vbat'] = df['Vbat'].map('{:,.1f}'.format)
df['LatDec'] = df['LatDec'].map('{:,.5f}'.format)
self.df = df
def split_data(self):
# Criteria:
# time > 5 min since last obs
# speed ~= 0
# dist < 0.005 (should be redundant with first two)
df = self.df
data = list()
durations = list()
first, last = 1, 1
for i in range(1, len(self.df)):
if df['dist'][i] < DIST_CRITERIA and \
(df['dt'][i] - df['dt'][i - 1]).seconds == 300:
# REMOVED:
# df['Speed_kmh'][i] < SPEED_CRITERIA and \ # Its possible to have two stations separated by less
# than a 5 minute drive (speed would be 0 for two consecutive log entries)
last += 1
else:
if last - first > 1:
data.append(CR_occupation(df[first:last], self.file))
durations.append(first - last)
first, last = i + 1, i + 1
continue
return data
class CR_occupation:
def __init__(self, df, file):
super(CR_occupation, self).__init__()
self.df = df
self.file = file
@property
def dtime(self):
# Return mean observation time
dt = self.df['dt']
m = self.df['dt'].min()
return (m + (dt - m).mean()).to_pydatetime()
@property
def duration(self):
return len(self.df)
if __name__ == '__main__':
fname = '.\\test_data\\test.dat'
cr_data = CR_data()
cr_data.load_data_from_file(fname)
occupations = cr_data.split_data()
jeff = 1
```
#### File: sgp-utils/sgp-utils/fg5_SY_plot.py
```python
import numpy as np
import pylab as plt
import datetime
from tkinter import filedialog
from tkinter import Tk
import matplotlib.dates as mdates
import matplotlib.ticker as tkr
import csv
import os
from nwis import nwis_get_data
from dateutil import parser
# # When saved, this exports fonts as fonts instead of paths:
plt.rcParams['svg.fonttype'] = 'none'
plt.interactive(False)
# Parameters than can be changed
presentation_style = True # Makes labels big
cross_ref_file = 'SiteIDcrossref.csv'
threshold = datetime.timedelta(days=5) # If within a threshold, just take the nearest data point
interpolate_threshold = datetime.timedelta(days = 50) # otherwise, interpolate if the data gap is below a threshold
write_output_to_file = False
# Formats y-axis labels
def func(x, pos):
s = '{:0,d}'.format(int(x))
return s
def write_to_file(filesavename, station, g_date, g, gwl, code, gap, y1, y2):
with open(filesavename, 'a') as fn:
fn.write('{},{},{},{},{},{},{},{}\n'.format(station, g_date, g, gwl, code, gap, y1, y2))
# Open dialog to specify input file. Alternatively, specify file directly.
root = Tk()
root.withdraw()
data_file = filedialog.askopenfilename(title="Select text file to plot (from A10_parse.py)")
if write_output_to_file:
out_file = data_file[:-4]
filesavename = str.replace(str(data_file), '.txt', '_SY.csv')
with open(filesavename, 'w+') as fn:
fn.write('Station,date,g,gwl,type,gap,start,end\n')
# data_file = "SanPedro_qaqc.txt"
# Matplotlibn interactive mode
plt.ion()
stations = []
myFmt = mdates.DateFormatter('%Y')
y_format = tkr.FuncFormatter(func)
# Get station list and column numbers from input file header
with open(data_file) as fp:
a = fp.readline()
a = a.strip()
tags = a.split("\t")
date_col = tags.index("Date")
sta_col = tags.index("Station Name")
grav_col = tags.index("Gravity")
for line in fp:
a = line.split("\t")
stations.append(a[sta_col])
# Remove duplicates
stations = list(set(stations))
# Initialize blank array to hold data. First array of each list element is date, second is gravity.
grav_data = [[[], []]]
nwis_data = ['']*len(stations)
for i in range(len(stations)-1):
grav_data.append([[], []])
# Retrieve data from nwis (will return both discrete and continuous data)
for station in stations:
sta_index = stations.index(station)
nwis_data[sta_index] = (nwis_get_data(cross_ref_file, station))
if nwis_data[sta_index] != 0:
nwis_data[sta_index]['station'] = station
# Get gravity data from input file
with open(data_file) as fp:
a = fp.readline()
for line in fp:
a = line.split("\t")
sta = a[sta_col]
sta_index = stations.index(sta)
# using the dateutil parser we can plot dates directly
grav_data[sta_index][0].append(parser.parse(a[date_col]))
grav_data[sta_index][1].append(float(a[grav_col]))
for idx, sta in enumerate(nwis_data):
if sta != 0: # Could be blank station names?
if sta['continuous_x'] or sta['discrete_x']: # Make sure there's some data
plot_x, plot_y = [], []
min_delta_cont, min_delta_disc = datetime.timedelta(days=1000000), datetime.timedelta(days=1000000)
# Iterate through the gravity values for a given station
for g_idx, g_date in enumerate(grav_data[idx][0]):
# find closest continuous data
if sta['continuous_x']:
repdate = np.repeat(g_date, len(sta['continuous_x'])) # vector of gravity-meas. dates
delta_cont = np.asarray(sta['continuous_x']) - repdate # vector of time-deltas
min_delta_cont = min(np.absolute(delta_cont))
idx_cont = np.argmin(np.absolute(delta_cont)) # index of gw level closest to gravity meas
# and closest discrete data
if sta['discrete_x']:
repdate = np.repeat(g_date, len(sta['discrete_x']))
delta_disc = np.asarray(sta['discrete_x']) - repdate
min_delta_disc = min(np.absolute(delta_disc))
idx_disc = np.argmin(np.absolute(delta_disc))
# check threshold
if min_delta_cont < threshold or min_delta_disc < threshold:
if min_delta_cont < min_delta_disc:
plot_x.append(sta['continuous_y'][idx_cont])
plot_y.append(grav_data[idx][1][g_idx])
if write_output_to_file:
write_to_file(filesavename, sta['station'], g_date, plot_y[-1], plot_x[-1], 'C', min_delta_cont.days, '0', '0')
elif min_delta_cont > min_delta_disc:
plot_x.append(sta['discrete_y'][idx_disc])
plot_y.append(grav_data[idx][1][g_idx])
if write_output_to_file:
write_to_file(filesavename, sta['station'], g_date, plot_y[-1], plot_x[-1], 'D', min_delta_disc.days, '0', '0')
continue
else: # No water-level measurements are very close. Check if we can interpolate.
interpolate = False
x1, x2, y1, y2 = [], [], [], []
cont_gap, disc_gap = datetime.timedelta(days=1000000), datetime.timedelta(days=1000000)
if sta['continuous_x']: # calculate continuous gap
if any(i < datetime.timedelta(days=0) for i in delta_cont) and \
any(i > datetime.timedelta(days=0) for i in delta_cont): # Check if data on both sides of gap
closest_neg = max([i for i in delta_cont if i <= datetime.timedelta(days=0)]) # time delta to closest negative diff
closest_pos = min([i for i in delta_cont if i >= datetime.timedelta(days=0)])
idx_closest_neg_cont, = np.nonzero(delta_cont == closest_neg)[0]
idx_closest_pos_cont, = np.nonzero(delta_cont == closest_pos)[0]
cont_gap = np.absolute(closest_neg) + closest_pos
if sta['discrete_x']:
if any(i < datetime.timedelta(days=0) for i in delta_disc) and \
any(i > datetime.timedelta(days=0) for i in delta_disc): # Check if data on both sides of gap
closest_neg = max([i for i in delta_disc if i <= datetime.timedelta(days=0)]) # time delta to closest negative diff
closest_pos = min([i for i in delta_disc if i >= datetime.timedelta(days=0)])
idx_closest_neg_disc, = np.nonzero(delta_disc == closest_neg)[0]
idx_closest_pos_disc, = np.nonzero(delta_disc == closest_pos)[0]
disc_gap = np.absolute(closest_neg) + closest_pos
if cont_gap < disc_gap: # interpolate the data type with the smaller gap
if cont_gap < interpolate_threshold:
x1 = sta['continuous_x'][idx_closest_neg_cont]
x2 = sta['continuous_x'][idx_closest_pos_cont]
y1 = sta['continuous_y'][idx_closest_neg_cont]
y2 = sta['continuous_y'][idx_closest_pos_cont]
interpolate = True
gap = cont_gap
elif disc_gap < cont_gap:
if disc_gap < interpolate_threshold:
x1 = sta['discrete_x'][idx_closest_neg_disc]
x2 = sta['discrete_x'][idx_closest_pos_disc]
y1 = sta['discrete_y'][idx_closest_neg_disc]
y2 = sta['discrete_y'][idx_closest_pos_disc]
interpolate = True
gap = disc_gap
if interpolate:
x1 = x1.toordinal()
x2 = x2.toordinal()
poly = np.polyfit([x1, x2], [y1, y2], 1)
p = np.poly1d(poly)
interpolated_dtw= p(g_date.toordinal())
plot_x.append(interpolated_dtw)
plot_y.append(grav_data[idx][1][g_idx])
print('Interpolated DTW at station {}, measurement on {}'.format(sta['station'], g_date))
print('Time gap = {}, WL change {} feet.'.format(gap, y2 - y1))
if write_output_to_file:
write_to_file(filesavename, sta['station'], g_date, plot_y[-1], plot_x[-1], 'I', gap.days, y1, y2)
else:
if write_output_to_file:
write_to_file(filesavename, sta['station'], g_date, grav_data[idx][1][g_idx], '0', 'N', '0', '0', '0')
print('no valid data to interpolate at station {}, measurement on {}'.format(sta['station'], g_date))
if len(plot_y) > 1: # If there's only 1 data point, don't bother
if presentation_style:
font = {'family': 'normal',
'weight': 'bold',
'size': 16}
plt.rc('font', **font)
plt.subplots_adjust(bottom=0.15, top=0.85, hspace=0.4, left=0.25, right=0.85)
plot_y = [(y-plot_y[0])/41.9 for y in plot_y]
plot_x = [(x-plot_x[0])*-.3048 for x in plot_x]
try: # Sometimes polyfit fails, even if there's 3 points?
poly, cov = np.polyfit(plot_x, plot_y, 1, cov=True)
cc = np.corrcoef(plot_x, plot_y)[0,1]
line_x = np.linspace(min(plot_x)-0.2, max(plot_x)+0.2,10)
p = np.poly1d(poly)
line_y = p(line_x)
plt.figure(facecolor='white')
plt.plot(plot_x, plot_y,'.')
plt.plot(line_x,line_y)
plt.title(sta['station'])
ax = plt.gca()
plt.ylabel('Change in water storage\n(meters of free-standing water, from gravity data)')
plt.xlabel('Change in groundwater level (meters)')
plt.figtext(0.25, 0.85, 'Sy = %0.2f ± %0.02f' % (poly[0], np.sqrt(cov[0,0])))
plt.figtext(0.25, 0.81, 'r^2 = %0.2f' % cc)
plt.savefig(sta['station'] + '.svg')
plt.show()
except ValueError as e:
print(e)
# This keeps the figure windows open until the user closes them:
input()
```
#### File: sgp-utils/sgp-utils/fg5_WL_plot.py
```python
from numpy import mod, ceil
import matplotlib.pylab as plt
from dateutil import parser
import datetime
from tkinter import filedialog
from tkinter import Tk
import matplotlib.dates as mdates
import matplotlib.ticker as tkr
from nwis import nwis_get_data
# Parameters and default values:
a10_sd = 5 # Default A-10 standard deviation, for error bars
convert_to_water = True # Converts gravity change to thickness-of-water change (41.9 microGal/m)
consistent_date_axes = True # Causes all plots to have the same time span (set by x_min, x_max)
cross_ref_file = 'SiteIDcrossref.csv' # File with gravity station names and corresponding 15-digit USGS ID
figs_per_page = 4 # plots per page
meters = True # Use meters or feet
# specify x-axis limits, instead of taking them from the data. If only gravity data are present (no water levels), the
# date range will be taken from the gravity data.
if consistent_date_axes:
x_min = datetime.datetime(2016,1,1)
x_max = datetime.datetime(2021,1,1)
# Formats y-axis labels
def func(x, pos):
s = '{}'.format(x)
return s
# Value to subtract from observed gravity so the plotted values are reasonable
offset = 978990000
# Open dialog to specify input file. Alternatively, specify file directly.
data_file = filedialog.askopenfilename(title="Select text file to plot (from A10_parse.py)")
# data_file = "SanPedro_qaqc.txt"
# Matplotlib interactive mode
plt.ioff()
stations = []
myFmt = mdates.DateFormatter('%Y')
y_format = tkr.FuncFormatter(func)
# Get station list and column numbers
with open(data_file) as fp:
a = fp.readline()
a = a.strip()
tags = a.split("\t")
date_col = tags.index("Date")
sta_col = tags.index("Station Name")
grav_col = tags.index("Gravity")
for line in fp:
a = line.split("\t")
stations.append(a[sta_col])
stations = list(set(stations))
# Initialize blank array to hold data. First array of each list element is date, second is gravity.
grav_data = [[[], []]]
nwis_data = ['']*len(stations)
for i in range(len(stations)-1):
grav_data.append([[], []])
for station in stations:
sta_index = stations.index(station)
nwis_data[sta_index] = (nwis_get_data(cross_ref_file, station))
# Get gravity data from input file
with open(data_file) as fp:
a = fp.readline()
for line in fp:
a = line.split("\t")
sta = a[sta_col]
sta_index = stations.index(sta)
# using the dateutil parser we can plot dates directly
grav_data[sta_index][0].append(parser.parse(a[date_col]))
grav_data[sta_index][1].append(float(a[grav_col]) - offset)
figidx = 1
i = 0
plt.figure(figsize=(8.5, 11))
while i < len(grav_data):
if nwis_data[i]:
if figidx >= (figs_per_page + 1):
plt.figure(figsize=(8.5, 11))
figidx = 1
plt.subplot(4,1,figidx)
grav_x = grav_data[i][0]
grav_y = grav_data[i][1]
ytemp = []
y0 = grav_y[0]
if convert_to_water:
if meters:
ytemp = [(p-y0) / 41.9 for p in grav_y]
a10sd = a10_sd / 41.9
else:
ytemp = [(p-y0) / 12.77 for p in grav_y]
a10sd = a10_sd / 12.77
rng = max(ytemp) - min(ytemp)
half_rng = ceil(rng)
else:
ytemp = [(p-y0) for p in grav_y]
a10sd = a10_sd
grav_y = ytemp
plt.errorbar(grav_x, grav_y, yerr=a10sd, fmt='kd')
ax = plt.gca()
if convert_to_water:
ax.set_ylim(0-half_rng, 0+half_rng)
ax2 = ax.twinx()
if nwis_data[i]['continuous_x']:
nwis_x = nwis_data[i]['continuous_x']
nwis_y = nwis_data[i]['continuous_y']
else:
nwis_x = nwis_data[i]['discrete_x']
nwis_y = nwis_data[i]['discrete_y']
if meters: # NWIS default is feet
nwis_y = [meas * .3048 for meas in nwis_y]
ax2.plot(nwis_x, nwis_y)
ax2.invert_yaxis()
# Remove scientific notation from axes labels
# ax.yaxis.get_major_formatter().set_useOffset(False)
# Add commas to y-axis tick mark labels
ax.yaxis.set_major_formatter(y_format)
# Set x-axis tick labels to just show year
ax.xaxis.set_major_formatter(myFmt)
if not consistent_date_axes:
# Adjust ticks so they fall on Jan 1 and extend past the range of the data. If there
# are data in January and December, add another year so that there is plenty of space.
start_month = grav_data[i][0][0].month
start_year = grav_data[i][0][0].year
end_month = grav_data[i][0][-1].month
end_year = grav_data[i][0][-1].year
if start_month == 1:
start_year = start_year-1
if end_month == 12:
end_year = end_year + 1
xticks = []
for iii in range(start_year,end_year+2):
xticks.append(datetime.datetime(iii,1,1))
ax.set_xticks(xticks)
else:
ax.set_xlim(x_min, x_max)
if convert_to_water:
if meters:
ax.set_ylabel('Storage change,\nin m of water')
else:
ax.set_ylabel('Storage change,\nin ft of water')
else:
ax.set_ylabel('Gravity change,\nin microGal')
if meters:
ax2.set_ylabel('Depth to\ngroundwater, m')
else:
ax2.set_ylabel('Depth to\ngroundwater, ft')
plt.title(stations[i])
plt.draw()
plt.subplots_adjust(bottom=0.25, hspace=0.4, left=0.25, right=0.85)
# When saved, this exports fonts as fonts instead of paths:
plt.rcParams['svg.fonttype'] = 'none'
figidx += 1
i += 1
plt.show()
``` |
{
"source": "jkennedyvz/DeepFaceLive",
"score": 2
} |
#### File: DeepFaceLive/ui/QFaceAligner.py
```python
from localization import L
from xlib import qt as qtx
from ..backend import FaceAligner
from .widgets.QBackendPanel import QBackendPanel
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
from .widgets.QLabelPopupInfo import QLabelPopupInfo
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
from .widgets.QComboBoxCSWDynamicSingleSwitch import QComboBoxCSWDynamicSingleSwitch
class QFaceAligner(QBackendPanel):
def __init__(self, backend : FaceAligner):
cs = backend.get_control_sheet()
q_align_mode_label = QLabelPopupInfo(label=L('@QFaceAligner.align_mode'), popup_info_text=L('@QFaceAligner.help.align_mode'))
q_align_mode = QComboBoxCSWDynamicSingleSwitch(cs.align_mode, reflect_state_widgets=[q_align_mode_label])
q_face_coverage_label = QLabelPopupInfo(label=L('@QFaceAligner.face_coverage'), popup_info_text=L('@QFaceAligner.help.face_coverage') )
q_face_coverage = QSpinBoxCSWNumber(cs.face_coverage, reflect_state_widgets=[q_face_coverage_label])
q_resolution_label = QLabelPopupInfo(label=L('@QFaceAligner.resolution'), popup_info_text=L('@QFaceAligner.help.resolution') )
q_resolution = QSpinBoxCSWNumber(cs.resolution, reflect_state_widgets=[q_resolution_label])
q_exclude_moving_parts_label = QLabelPopupInfo(label=L('@QFaceAligner.exclude_moving_parts'), popup_info_text=L('@QFaceAligner.help.exclude_moving_parts') )
q_exclude_moving_parts = QCheckBoxCSWFlag(cs.exclude_moving_parts, reflect_state_widgets=[q_exclude_moving_parts_label])
q_head_mode_label = QLabelPopupInfo(label=L('@QFaceAligner.head_mode'), popup_info_text=L('@QFaceAligner.help.head_mode') )
q_head_mode = QCheckBoxCSWFlag(cs.head_mode, reflect_state_widgets=[q_head_mode_label])
q_freeze_z_rotation_label = QLabelPopupInfo(label=L('@QFaceAligner.freeze_z_rotation') )
q_freeze_z_rotation = QCheckBoxCSWFlag(cs.freeze_z_rotation, reflect_state_widgets=[q_freeze_z_rotation_label])
q_x_offset_label = QLabelPopupInfo(label=L('@QFaceAligner.x_offset'))
q_x_offset = QSpinBoxCSWNumber(cs.x_offset, reflect_state_widgets=[q_x_offset_label])
q_y_offset_label = QLabelPopupInfo(label=L('@QFaceAligner.y_offset'))
q_y_offset = QSpinBoxCSWNumber(cs.y_offset, reflect_state_widgets=[q_y_offset_label])
grid_l = qtx.QXGridLayout(spacing=5)
row = 0
grid_l.addWidget(q_align_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_align_mode, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_face_coverage_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_face_coverage, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_resolution_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_resolution, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_exclude_moving_parts_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_exclude_moving_parts, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_head_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_head_mode, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_freeze_z_rotation_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_freeze_z_rotation, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addLayout( qtx.QXVBoxLayout([q_x_offset_label, q_y_offset_label]), row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addLayout( qtx.QXHBoxLayout([q_x_offset, q_y_offset]), row, 1, alignment=qtx.AlignLeft )
row += 1
super().__init__(backend, L('@QFaceAligner.module_title'),
layout=qtx.QXVBoxLayout([grid_l]))
```
#### File: DeepFaceLive/ui/QFaceAnimator.py
```python
from pathlib import Path
from localization import L
from resources.gfx import QXImageDB
from xlib import qt as qtx
from ..backend import FaceAnimator
from .widgets.QBackendPanel import QBackendPanel
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
from .widgets.QComboBoxCSWDynamicSingleSwitch import \
QComboBoxCSWDynamicSingleSwitch
from .widgets.QLabelPopupInfo import QLabelPopupInfo
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
from .widgets.QXPushButtonCSWSignal import QXPushButtonCSWSignal
from .widgets.QSliderCSWNumber import QSliderCSWNumber
class QFaceAnimator(QBackendPanel):
def __init__(self, backend : FaceAnimator, animatables_path : Path):
self._animatables_path = animatables_path
cs = backend.get_control_sheet()
btn_open_folder = self.btn_open_folder = qtx.QXPushButton(image = QXImageDB.eye_outline('light gray'), tooltip_text='Reveal in Explorer', released=self._btn_open_folder_released, fixed_size=(24,22) )
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
q_animatable_label = QLabelPopupInfo(label=L('@QFaceAnimator.animatable') )
q_animatable = QComboBoxCSWDynamicSingleSwitch(cs.animatable, reflect_state_widgets=[q_animatable_label, btn_open_folder])
q_animator_face_id_label = QLabelPopupInfo(label=L('@QFaceAnimator.animator_face_id') )
q_animator_face_id = QSpinBoxCSWNumber(cs.animator_face_id, reflect_state_widgets=[q_animator_face_id_label])
q_relative_mode_label = QLabelPopupInfo(label=L('@QFaceAnimator.relative_mode') )
q_relative_mode = QCheckBoxCSWFlag(cs.relative_mode, reflect_state_widgets=[q_relative_mode_label])
q_relative_power = QSliderCSWNumber(cs.relative_power)
q_update_animatables = QXPushButtonCSWSignal(cs.update_animatables, image=QXImageDB.reload_outline('light gray'), button_size=(24,22) )
q_reset_reference_pose = QXPushButtonCSWSignal(cs.reset_reference_pose, text=L('@QFaceAnimator.reset_reference_pose') )
grid_l = qtx.QXGridLayout( spacing=5)
row = 0
grid_l.addWidget(q_device_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_device, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_animatable_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addLayout(qtx.QXHBoxLayout([q_animatable, 2, btn_open_folder, 2, q_update_animatables]), row, 1 )
row += 1
grid_l.addWidget(q_animator_face_id_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_animator_face_id, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_relative_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addLayout(qtx.QXHBoxLayout([q_relative_mode,2,q_relative_power]), row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_reset_reference_pose, row, 0, 1, 2 )
row += 1
super().__init__(backend, L('@QFaceAnimator.module_title'),
layout=qtx.QXVBoxLayout([grid_l]) )
def _btn_open_folder_released(self):
qtx.QDesktopServices.openUrl(qtx.QUrl.fromLocalFile( str(self._animatables_path) ))
```
#### File: ui/widgets/QBCFaceAlignViewer.py
```python
import numpy as np
from localization import L
from resources.fonts import QXFontDB
from xlib import qt as qtx
from ... import backend
class QBCFaceAlignViewer(qtx.QXCollapsibleSection):
def __init__(self, backed_weak_heap : backend.BackendWeakHeap,
bc : backend.BackendConnection,
preview_width=256,):
self._preview_width = preview_width
self._timer = qtx.QXTimer(interval=16, timeout=self._on_timer_16ms, start=True)
self._backed_weak_heap = backed_weak_heap
self._bc = bc
self._bcd_id = None
layered_images = self._layered_images = qtx.QXFixedLayeredImages(preview_width, preview_width)
info_label = self._info_label = qtx.QXLabel( font=QXFontDB.get_fixedwidth_font(size=7))
super().__init__(title=L('@QBCFaceAlignViewer.title'),
content_layout=qtx.QXVBoxLayout([(layered_images, qtx.AlignCenter),
(info_label, qtx.AlignCenter)]) )
def _on_timer_16ms(self):
top_qx = self.get_top_QXWindow()
if not self.is_opened() or (top_qx is not None and top_qx.is_minimized() ):
return
bcd_id = self._bc.get_write_id()
if self._bcd_id != bcd_id:
# Has new bcd version
bcd, self._bcd_id = self._bc.get_by_id(bcd_id), bcd_id
if bcd is not None:
bcd.assign_weak_heap(self._backed_weak_heap)
self._layered_images.clear_images()
for fsi in bcd.get_face_swap_info_list():
face_image = bcd.get_image (fsi.face_align_image_name)
if face_image is not None:
h,w = face_image.shape[:2]
self._layered_images.add_image(face_image)
if fsi.face_align_ulmrks is not None:
lmrks_layer = np.zeros( (self._preview_width, self._preview_width, 4), dtype=np.uint8)
fsi.face_align_ulmrks.draw(lmrks_layer, (0,255,0,255))
if fsi.face_urect is not None and fsi.image_to_align_uni_mat is not None:
aligned_uni_rect = fsi.face_urect.transform(fsi.image_to_align_uni_mat)
aligned_uni_rect.draw(lmrks_layer, (0,0,255,255) )
self._layered_images.add_image(lmrks_layer)
self._info_label.setText(f'{w}x{h}')
return
def clear(self):
self._layered_images.clear_images()
```
#### File: ui/widgets/QCheckBoxCSWFlag.py
```python
from xlib import qt as qtx
from xlib.mp import csw as lib_csw
from .QCSWControl import QCSWControl
class QCheckBoxCSWFlag(QCSWControl):
"""
Implements lib_csw.Flag control as CheckBox
"""
def __init__(self, csw_flag : lib_csw.Flag.Client, reflect_state_widgets=None):
if not isinstance(csw_flag, lib_csw.Flag.Client):
raise ValueError('csw_flag must be an instance of Flag.Client')
self._csw_flag = csw_flag
csw_flag.call_on_flag(self.on_csw_flag)
chbox = self._chbox = qtx.QXCheckBox(clicked=self.on_chbox_clicked)
super().__init__(csw_control=csw_flag, reflect_state_widgets=reflect_state_widgets,
layout=qtx.QXHBoxLayout([chbox]))
def on_csw_flag(self, flag):
with qtx.BlockSignals(self._chbox):
self._chbox.setChecked(flag)
def on_chbox_clicked(self):
self._csw_flag.set_flag(self._chbox.isChecked())
```
#### File: ui/widgets/QLabelPopupInfo.py
```python
from typing import Union
from resources.fonts import QXFontDB
from resources.gfx import QXImageDB
from xlib import qt as qtx
class QLabelPopupInfo(qtx.QXWidget):
def __init__(self, label : str = None, popup_info_text = None):
"""
text label with optional popup info on click
"""
super().__init__()
self._has_info_text = False
self._label = qtx.QXLabel(text='', hided=True)
wnd_text_label = self._popup_wnd_text_label = qtx.QXLabel(text='', font=QXFontDB.get_default_font() )
wnd = self._popup_wnd = qtx.QXPopupWindow(layout=qtx.QXHBoxLayout([
qtx.QXFrame(bg_color= qtx.Qt.GlobalColor.black,
layout=qtx.QXHBoxLayout([
qtx.QXFrame(layout=qtx.QXHBoxLayout([qtx.QXLabel(image=QXImageDB.information_circle_outline('yellow'), scaled_contents=True, fixed_size=(24,24)),
wnd_text_label
], contents_margins=2, spacing=2)),
], contents_margins=2, spacing=2), size_policy=('fixed', 'fixed') )
], contents_margins=0) )
info_btn = self._info_btn = qtx.QXPushButton(image=QXImageDB.information_circle_outline('light gray'), released=self._on_info_btn_released, fixed_size=(24,22), hided=True)
self.setLayout(qtx.QXHBoxLayout([self._label, info_btn]))
self.set_label( label )
self.set_popup_info( popup_info_text )
def set_info_icon(self):
self._label.hide()
self._info_btn.show()
def set_label(self, label : Union[str, None]):
self._info_btn.hide()
self._label.setText(label)
self._label.show()
def set_popup_info(self, text : Union[str, None]):
if text is not None:
self._has_info_text = True
self._popup_wnd_text_label.setText(text)
else:
self._has_info_text = False
def enterEvent(self, ev):
super().enterEvent(ev)
if self.isEnabled() and self._has_info_text:
self._label.set_color('yellow')
def leaveEvent(self, ev):
super().leaveEvent(ev)
if self.isEnabled() and self._has_info_text:
self._label.set_color(None)
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
self._show_popup_wnd()
def _on_info_btn_released(self):
self._show_popup_wnd()
def _show_popup_wnd(self):
if self._has_info_text:
popup_wnd = self._popup_wnd
popup_wnd.show()
label_widget = self._label
if label_widget.isHidden():
label_widget = self._info_btn
screen_size = qtx.QXMainApplication.inst.primaryScreen().size()
label_size = label_widget.size()
global_pt = label_widget.mapToGlobal( qtx.QPoint(0, label_size.height()))
popup_wnd_size = popup_wnd.size()
global_pt = qtx.QPoint( min(global_pt.x(), screen_size.width() - popup_wnd_size.width()),
min(global_pt.y(), screen_size.height() - popup_wnd_size.height()) )
popup_wnd.move(global_pt)
```
#### File: ui/widgets/QLineEditCSWText.py
```python
from pathlib import Path
from resources.fonts import QXFontDB
from resources.gfx import QXImageDB
from xlib import qt as qtx
from xlib.mp import csw as lib_csw
from .QCSWControl import QCSWControl
class QLineEditCSWText(QCSWControl):
def __init__(self, csw_text : lib_csw.Text.Client,
font = None,
reflect_state_widgets=None):
"""
Implements lib_csw.Text control as LineEdit
"""
if not isinstance(csw_text, lib_csw.Text.Client):
raise ValueError('csw_path must be an instance of Text.Client')
self._csw_text = csw_text
self._dlg = None
csw_text.call_on_text(self._on_csw_text)
if font is None:
font = QXFontDB.get_default_font()
lineedit = self._lineedit = qtx.QXLineEdit(font=font,
placeholder_text='...',
size_policy=('expanding', 'fixed'),
editingFinished=self.on_lineedit_editingFinished)
super().__init__(csw_control=csw_text, reflect_state_widgets=reflect_state_widgets,
layout=qtx.QXHBoxLayout([lineedit]) )
def _on_csw_text(self, text):
with qtx.BlockSignals(self._lineedit):
self._lineedit.setText(text)
def on_lineedit_editingFinished(self):
text = self._lineedit.text()
if len(text) == 0:
text = None
self._csw_text.set_text(text)
```
#### File: torch/FaceAligner/FaceAligner.py
```python
from functools import partial
from pathlib import Path
from typing import Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from xlib.file import SplittedFile
from xlib.torch import TorchDeviceInfo, get_cpu_device_info
def _make_divisible(v: float, divisor: int, min_value = None) -> int:
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitation(nn.Module):
def __init__( self, in_ch: int, squeeze_channels: int, activation = nn.ReLU, scale_activation = nn.Sigmoid):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(in_ch, squeeze_channels, 1)
self.fc2 = nn.Conv2d(squeeze_channels, in_ch, 1)
self.activation = activation()
self.scale_activation = scale_activation()
def forward(self, input):
scale = self.avgpool(input)
scale = self.fc1(scale)
scale = self.activation(scale)
scale = self.fc2(scale)
scale = self.scale_activation(scale)
return scale * input
class ConvNormActivation(nn.Sequential):
def __init__(self, in_ch: int, out_ch: int, kernel_size: int = 3, stride: int = 1, padding = None, groups: int = 1, norm_layer = nn.BatchNorm2d, activation_layer = nn.ReLU,) -> None:
if padding is None:
padding = (kernel_size - 1) // 2
layers = [torch.nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding, groups=groups, bias=norm_layer is None)]
if norm_layer is not None:
layers.append(norm_layer(out_ch))
if activation_layer is not None:
layers.append(activation_layer())
super().__init__(*layers)
self.out_ch = out_ch
class InvertedResidual(nn.Module):
def __init__(self, in_ch: int, mid_ch: int, out_ch: int, kernel: int, stride: int, use_se: bool,
hs_act : bool, width_mult: float = 1.0,
norm_layer = None,):
super().__init__()
mid_ch = _make_divisible(mid_ch * width_mult, 8)
out_ch = _make_divisible(out_ch * width_mult, 8)
self._is_res_connect = stride == 1 and in_ch == out_ch
activation_layer = nn.Hardswish if hs_act else nn.ReLU
layers = []
if mid_ch != in_ch:
layers.append(ConvNormActivation(in_ch, mid_ch, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer))
layers.append(ConvNormActivation(mid_ch, mid_ch, kernel_size=kernel, stride=stride, groups=mid_ch, norm_layer=norm_layer, activation_layer=activation_layer))
if use_se:
layers.append( SqueezeExcitation(mid_ch, _make_divisible(mid_ch // 4, 8), scale_activation=nn.Hardsigmoid) )
layers.append(ConvNormActivation(mid_ch, out_ch, kernel_size=1, norm_layer=norm_layer, activation_layer=None))
self.block = nn.Sequential(*layers)
self.out_ch = out_ch
def forward(self, input):
result = self.block(input)
if self._is_res_connect:
result = result + input
return result
class FaceAlignerNet(nn.Module):
def __init__(self):
super().__init__()
norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
width_mult = 1.66
self.c0 = c0 = ConvNormActivation(3, _make_divisible(16 * width_mult, 8), kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.Hardswish)
self.c1 = c1 = InvertedResidual ( c0.out_ch, 16, 16, 3, 1, use_se=False, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c2 = c2 = InvertedResidual ( c1.out_ch, 64, 24, 3, 2, use_se=False, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c3 = c3 = InvertedResidual ( c2.out_ch, 72, 24, 3, 1, use_se=False, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c4 = c4 = InvertedResidual ( c3.out_ch, 72, 40, 5, 2, use_se=True, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c5 = c5 = InvertedResidual ( c4.out_ch, 120, 40, 5, 1, use_se=True, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c6 = c6 = InvertedResidual ( c5.out_ch, 120, 40, 5, 1, use_se=True, hs_act=False, norm_layer=norm_layer, width_mult=width_mult)
self.c7 = c7 = InvertedResidual ( c6.out_ch, 240, 80, 3, 2, use_se=False, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c8 = c8 = InvertedResidual ( c7.out_ch, 200, 80, 3, 1, use_se=False, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c9 = c9 = InvertedResidual ( c8.out_ch, 184, 80, 3, 1, use_se=False, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c10 = c10 = InvertedResidual ( c9.out_ch, 184, 80, 3, 1, use_se=False, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c11 = c11 = InvertedResidual ( c10.out_ch, 480, 112, 3, 1, use_se=True, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c12 = c12 = InvertedResidual ( c11.out_ch, 672, 112, 3, 1, use_se=True, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c13 = c13 = InvertedResidual ( c12.out_ch, 672, 160, 5, 2, use_se=True, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c14 = c14 = InvertedResidual ( c13.out_ch, 960, 160, 5, 1, use_se=True, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c15 = c15 = InvertedResidual ( c14.out_ch, 960, 160, 5, 1, use_se=True, hs_act=True, norm_layer=norm_layer, width_mult=width_mult)
self.c16 = c16 = ConvNormActivation(c15.out_ch, _make_divisible(6*160*width_mult, 8), kernel_size=1, norm_layer=norm_layer, activation_layer=nn.Hardswish)
self.fc1 = nn.Linear(c16.out_ch, _make_divisible(c16.out_ch*1.33, 8))
self.fc1_act = nn.Hardswish()
self.fc2 = nn.Linear(self.fc1.out_features, 4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, inp):
x = inp
x = self.c0(x)
x = self.c1(x)
x = self.c2(x)
x = self.c3(x)
x = self.c4(x)
x = self.c5(x)
x = self.c6(x)
x = self.c7(x)
x = self.c8(x)
x = self.c9(x)
x = self.c10(x)
x = self.c11(x)
x = self.c12(x)
x = self.c13(x)
x = self.c14(x)
x = self.c15(x)
x = self.c16(x)
x = x.mean((-2,-1))
x = self.fc1(x)
x = self.fc1_act(x)
x = self.fc2(x)
scale_t, angle_t, tx_t, ty_t = torch.split(x, 1, -1)
aff_t = torch.cat([torch.cos(angle_t)*scale_t, -torch.sin(angle_t)*scale_t, tx_t,
torch.sin(angle_t)*scale_t, torch.cos(angle_t)*scale_t, ty_t,
], dim=-1).view(-1,2,3)
return aff_t
```
#### File: torch/S3FD/S3FD.py
```python
import operator
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from xlib import math as lib_math
from xlib.file import SplittedFile
from xlib.image import ImageProcessor
from xlib.torch import TorchDeviceInfo, get_cpu_device_info
class S3FD:
def __init__(self, device_info : TorchDeviceInfo = None ):
if device_info is None:
device_info = get_cpu_device_info()
self.device_info = device_info
path = Path(__file__).parent / 'S3FD.pth'
SplittedFile.merge(path, delete_parts=False)
net = self.net = S3FDNet()
net.load_state_dict( torch.load(str(path) ))
net.eval()
if not device_info.is_cpu():
net.cuda(device_info.get_index())
def extract(self, img : np.ndarray, fixed_window, min_face_size=40):
"""
"""
ip = ImageProcessor(img)
if fixed_window != 0:
fixed_window = max(64, max(1, fixed_window // 32) * 32 )
img_scale = ip.fit_in(fixed_window, fixed_window, pad_to_target=True, allow_upscale=False)
else:
ip.pad_to_next_divisor(64, 64)
img_scale = 1.0
img = ip.ch(3).as_float32().apply( lambda img: img - [104,117,123]).get_image('NCHW')
tensor = torch.from_numpy(img)
if not self.device_info.is_cpu():
tensor = tensor.cuda(self.device_info.get_index())
batches_bbox = [x.data.cpu().numpy() for x in self.net(tensor)]
faces_per_batch = []
for batch in range(img.shape[0]):
bbox = self.refine( [ x[batch] for x in batches_bbox ] )
faces = []
for l,t,r,b,c in bbox:
if img_scale != 1.0:
l,t,r,b = l/img_scale, t/img_scale, r/img_scale, b/img_scale
bt = b-t
if min(r-l,bt) < min_face_size:
continue
b += bt*0.1
faces.append ( (l,t,r,b) )
#sort by largest area first
faces = [ [(l,t,r,b), (r-l)*(b-t) ] for (l,t,r,b) in faces ]
faces = sorted(faces, key=operator.itemgetter(1), reverse=True )
faces = [ x[0] for x in faces]
faces_per_batch.append(faces)
return faces_per_batch
def refine(self, olist):
bboxlist = []
variances = [0.1, 0.2]
for i in range(len(olist) // 2):
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
stride = 2**(i + 2) # 4,8,16,32,64,128
for hindex, windex in [*zip(*np.where(ocls[1, :, :] > 0.05))]:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls[1, hindex, windex]
loc = np.ascontiguousarray(oreg[:, hindex, windex]).reshape((1, 4))
priors = np.array([[axc, ayc, stride * 4, stride * 4]])
bbox = np.concatenate((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), 1)
bbox[:, :2] -= bbox[:, 2:] / 2
bbox[:, 2:] += bbox[:, :2]
x1, y1, x2, y2 = bbox[0]
bboxlist.append([x1, y1, x2, y2, score])
if len(bboxlist) != 0:
bboxlist = np.array(bboxlist)
bboxlist = bboxlist[ lib_math.nms(bboxlist[:,0], bboxlist[:,1], bboxlist[:,2], bboxlist[:,3], bboxlist[:,4], 0.3), : ]
bboxlist = [x for x in bboxlist if x[-1] >= 0.5]
return bboxlist
@staticmethod
def save_as_onnx(onnx_filepath):
s3fd = S3FD()
torch.onnx.export(s3fd.net,
torch.from_numpy( np.zeros( (1,3,640,640), dtype=np.float32)),
str(onnx_filepath),
verbose=True,
training=torch.onnx.TrainingMode.EVAL,
opset_version=9,
do_constant_folding=True,
input_names=['in'],
output_names=['cls1', 'reg1', 'cls2', 'reg2', 'cls3', 'reg3', 'cls4', 'reg4', 'cls5', 'reg5', 'cls6', 'reg6'],
dynamic_axes={'in' : {0:'batch_size',2:'height',3:'width'},
'cls1' : {2:'height',3:'width'},
'reg1' : {2:'height',3:'width'},
'cls2' : {2:'height',3:'width'},
'reg2' : {2:'height',3:'width'},
'cls3' : {2:'height',3:'width'},
'reg3' : {2:'height',3:'width'},
'cls4' : {2:'height',3:'width'},
'reg4' : {2:'height',3:'width'},
'cls5' : {2:'height',3:'width'},
'reg5' : {2:'height',3:'width'},
'cls6' : {2:'height',3:'width'},
'reg6' : {2:'height',3:'width'},
},
)
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super().__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
class S3FDNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3)
self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0)
self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0)
self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv3_3_norm = L2Norm(256, scale=10)
self.conv4_3_norm = L2Norm(512, scale=8)
self.conv5_3_norm = L2Norm(512, scale=5)
self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding=1)
self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding=1)
self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding=1)
self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
def forward(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
f3_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
f4_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
f5_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
ffc7 = h
h = F.relu(self.conv6_1(h))
h = F.relu(self.conv6_2(h))
f6_2 = h
h = F.relu(self.conv7_1(h))
h = F.relu(self.conv7_2(h))
f7_2 = h
f3_3 = self.conv3_3_norm(f3_3)
f4_3 = self.conv4_3_norm(f4_3)
f5_3 = self.conv5_3_norm(f5_3)
cls1 = self.conv3_3_norm_mbox_conf(f3_3)
reg1 = self.conv3_3_norm_mbox_loc(f3_3)
cls2 = self.conv4_3_norm_mbox_conf(f4_3)
reg2 = self.conv4_3_norm_mbox_loc(f4_3)
cls3 = self.conv5_3_norm_mbox_conf(f5_3)
reg3 = self.conv5_3_norm_mbox_loc(f5_3)
cls4 = self.fc7_mbox_conf(ffc7)
reg4 = self.fc7_mbox_loc(ffc7)
cls5 = self.conv6_2_mbox_conf(f6_2)
reg5 = self.conv6_2_mbox_loc(f6_2)
cls6 = self.conv7_2_mbox_conf(f7_2)
reg6 = self.conv7_2_mbox_loc(f7_2)
# max-out background label
chunk = torch.chunk(cls1, 4, 1)
bmax = torch.max(torch.max(chunk[0], chunk[1]), chunk[2])
cls1 = torch.cat ([bmax,chunk[3]], dim=1)
cls1, cls2, cls3, cls4, cls5, cls6 = [ F.softmax(x, dim=1) for x in [cls1, cls2, cls3, cls4, cls5, cls6] ]
return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6]
```
#### File: win32/oleaut32/oleaut32.py
```python
from ctypes import POINTER, Structure
from ..wintypes import VARIANT, dll_import
@dll_import('OleAut32')
def VariantInit( pvarg : POINTER(VARIANT) ) -> None: ...
```
#### File: avecl/_internal/AAxes.py
```python
from collections import Iterable
class AAxes(Iterable):
__slots__ = ['axes','ndim','_inversed']
def __init__(self, axes, shape_ndim=None):
"""
Constructs AAxes from user argument
arguments
axes AAxes
Int
Iterable of ints
None
shape_ndim(None) provide shape_ndim if axes contain negative values
can raise an errors during the construction
AAxes supports:
A+B : concat A_axes with B_axes
A-B : removes B_axes from A_axes
"""
if isinstance(axes, AAxes):
self.axes = axes.axes
self.ndim = axes.ndim
self._inversed = axes._inversed
elif axes is None:
self.axes = None
self.ndim = None
self._inversed = None
else:
if not isinstance(axes, Iterable):
axes = (axes,)
if isinstance(axes, Iterable):
valid_axes = []
for x in axes:
if x is None:
raise ValueError(f'Incorrent value {x} in axes {axes}')
x = int(x)
if x < 0:
if shape_ndim is None:
raise ValueError(f'Incorrent value {x} in axes {axes}, or provide shape_ndim')
x = shape_ndim + x
if x in valid_axes:
raise ValueError(f'Axes must contain unique values.')
valid_axes.append(x)
self.axes = tuple(valid_axes)
self.ndim = len(self.axes)
self._inversed = None
def is_none_axes(self):
"""
returns True if AAxes is constructed with (None) argument, i.e. all-axes
"""
return self.axes is None
def sorted(self) -> 'AAxes':
"""
returns sorted AAxes
"""
return AAxes(sorted(self.axes))
def swapped_axes(self, axis_a, axis_b) -> 'AAxes':
x = list(self.axes)
if axis_a < 0:
axis_a = len(x) + axis_a
if axis_b < 0:
axis_b = len(x) + axis_b
x[axis_b], x[axis_a] = x[axis_a], x[axis_b]
return AAxes( tuple(x) )
def inversed(self) -> 'AAxes':
"""
Returns inversed axes order
Example:
for (0,2,3,1) returns (0,3,1,2)
"""
if self.is_none_axes():
raise Exception(f'none-axes does not support inversed(). Handle none-axes by calling .is_none_axes()')
if self._inversed is None:
x = { axis:i for i,axis in enumerate(self.axes) }
t = []
for i in range(self.ndim):
axis = x.get(i, None)
if axis is None:
raise Exception(f'axes {self.axes} are inconsistent to do inverse order.')
t.append(axis)
self._inversed = AAxes(t)
return self._inversed
def __hash__(self): return self.axes.__hash__()
def __eq__(self, other):
if isinstance(other, AAxes):
return self.axes == other.axes
elif isinstance(other, Iterable):
return self.axes == tuple(other)
return False
def __iter__(self):
if self.is_none_axes():
raise Exception(f'none-axes does not support iteration. Handle none-axes by calling .is_none_axes()')
return self.axes.__iter__()
def __len__(self): return self.ndim
def __getitem__(self,key):
if self.is_none_axes():
raise Exception(f'none-axes does not support indexing. Handle none-axes by calling .is_none_axes()')
elif isinstance(key, slice):
return AAxes(self.axes[key])
return self.axes[key]
def __radd__(self, o):
if isinstance(o, Iterable):
return AAxes( tuple(o) + self.axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes append')
def __add__(self, o):
if isinstance(o, Iterable):
return AAxes( self.axes + tuple(o) )
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes append')
def __rsub__(self, o):
if isinstance(o, Iterable):
new_axes = []
for axis in o:
if axis not in self.axes:
new_axes.append(axis)
return AAxes(new_axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes substraction')
def __sub__(self, o):
if isinstance(o, Iterable):
new_axes = []
o_axes = tuple(o)
for axis in self.axes:
if axis not in o_axes:
new_axes.append(axis)
return AAxes(new_axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes substraction')
def __str__(self):
if self.is_none_axes():
return '(None)'
return str(self.axes)
def __repr__(self): return 'AAxes' + self.__str__()
__all__ = ['AAxes']
```
#### File: _internal/backend/Kernel.py
```python
class Kernel:
"""
TensorCL kernel.
It does not allocate any resources, thus can be used as static variable within class.
arguments
kernel_text OpenCL text of kernel. Must contain only one __kernel
global_shape default global_shape for .run()
local_shape default local_shape for .run()
"""
def __init__(self, kernel_text, global_shape=None, local_shape=None):
self._kernel_text = kernel_text
self._global_shape = global_shape
self._local_shape = local_shape
def get_kernel_text(self) -> str: return self._kernel_text
def get_global_shape(self): return self._global_shape
def get_local_shape(self): return self._local_shape
def __str__(self): return f'Kernel: \n{self._kernel_text}'
def __repr__(self): return self.__str__()
```
#### File: avecl/_internal/HArgs.py
```python
from typing import List
import numpy as np
from .backend import Device
from .HTensor import HTensor
from .HType import HType
from .Tensor import Tensor
from .AShape import AShape
class HArgs:
"""
Helper functions for list of arguments
"""
@staticmethod
def decompose(args):
"""
decompose list of args of Tensor and supported numeric values
returns ( shape_list, # if scalar value -> shape is None
dtype_list, #
kernel_args_list #
)
"""
shape_list = []
dtype_list = []
kernel_args_list = []
for arg in args:
if isinstance(arg, Tensor):
shape_list.append(arg.shape)
dtype_list.append(arg.dtype)
kernel_args_list.append(arg.get_buffer())
else:
if isinstance(arg, int):
dtype, arg = np.int32, np.int32(arg)
elif isinstance(arg, float):
dtype, arg = np.float32, np.float32(arg)
elif HType.is_obj_of_np_scalar_type(arg):
dtype = arg.__class__
else:
raise ValueError(f'Unsupported type of arg: {arg.__class__} Use Tensor or number type.')
shape_list.append(None)
dtype_list.append(dtype)
kernel_args_list.append(arg)
return tuple(shape_list), tuple(dtype_list), tuple(kernel_args_list)
@staticmethod
def get_shapes(args : List[Tensor]) -> List[AShape]:
"""
"""
return tuple(t.shape for t in args)
@staticmethod
def check_zero_get_length(args) -> int:
"""
raises an error if len(args) == 0, otherwise returns len
"""
args_len = len(args)
if len(args) == 0:
raise ValueError('args must be specified')
return args_len
@staticmethod
def check_get_same_device(args : List[Tensor]) -> Device:
"""
check all device of tensors are the same and return the device
"""
result = HTensor.all_same_device(args)
if not result:
raise ValueError('all Tensors must have the same device')
return args[0].get_device()
@staticmethod
def check_all_tensors(args : List[Tensor]):
"""
"""
if not all (isinstance(tensor, Tensor) for tensor in args):
raise ValueError('All values must have type of Tensor')
@staticmethod
def check_get_same_shape(args : List[Tensor]) -> AShape:
"""
check all shapes of tensors are the same and return the shape
"""
shape = args[0].shape
if not all (t.shape == shape for t in args):
raise ValueError('All tensors must have the same shape')
return shape
@staticmethod
def filter_tensor(args, raise_on_empty : bool):
"""
get only tensors from the list
"""
tensor_args = [arg for arg in args if isinstance(arg, Tensor) ]
if raise_on_empty and len(tensor_args) == 0:
raise ValueError('At least one arg must be a Tensor')
return tensor_args
__all__ = ['HArgs']
```
#### File: _internal/info/SliceInfo.py
```python
import math
import numpy as np
from ..AShape import AShape
class SliceInfo:
__slots__ = ['o_shape', 'o_shape_kd', 'just_reshaped','axes_bes','axes_abs_bes']
def __init__(self, shape : AShape, slices):
"""
Slice info.
can raise ValueError,TypeError during the construction
arguments
slices
Example
o_shape result shape after slice
axes_bes list of (begin,step,end) per axis
if s==0, then single axis element is fetched from position b
s can be negative.
"""
# Validate slices argument for given shape.
new_slices = []
before_ellipsis = None
for s in slices:
if s is Ellipsis:
before_ellipsis = new_slices
new_slices = []
continue
elif s is not None and not isinstance(s, (int,tuple) ):
raise ValueError(f'unknown slice argument {s} of type {s.__class__}')
new_slices.append(s)
if before_ellipsis is not None:
# Process Ellipsis separator
new_slices_n_axes = sum([ 1 for x in new_slices if x != None])
before_ellipsis_n_axes = sum([ 1 for x in before_ellipsis if x != None])
# Expand slices by filling intermediate (None,None,None) for each remaining axis
new_slices = before_ellipsis + \
[(None,None,None)]*max(0, shape.ndim-before_ellipsis_n_axes-new_slices_n_axes) + \
new_slices
new_slices_n_axes = sum([ 1 for x in new_slices if x != None])
if new_slices_n_axes > shape.ndim:
raise ValueError('slices arguments more than shape axes')
elif new_slices_n_axes < shape.ndim:
# Fill remaining axes
new_slices += [(None,None,None)]*( shape.ndim - new_slices_n_axes )
slices = tuple(new_slices)
# Compute shapes
output_is_reshaped = True # Flag determines that output_tensor
# can be just reshaped without any computation
o_shape = [] # output tensor shape
o_shape_kd = [] # output shape used in kernel, must match input shape
axes_bes = []
axes_abs_bes = []
i_axis = 0
# Process slices arguments
for v in slices:
if v is None:
# None is new axis
# We can add unlimited number of (1,) axes at any place of shape
o_shape.append(1)
continue
i_axis_size = shape[i_axis]
i_axis += 1
if isinstance(v, int):
if v < 0:
v += i_axis_size
if v < 0 or v >= i_axis_size:
raise ValueError(f'index {v} is out of bounds for axis {i_axis} with size {i_axis_size}')
b,e,s = v,v,0
else:
b,e,s = v
if s == 0:
raise ValueError(f'slice step cannot be zero')
# Fix begin, end, step values
if s is None:
s = 1
if b is None:
b = 0 if s >= 0 else i_axis_size-1
if e is None:
e = i_axis_size if s >= 0 else -1
elif e < 0:
e += i_axis_size
if b < 0:
b += i_axis_size
if s >= 0:
b = np.clip(b, 0, i_axis_size)
e = np.clip(e, 0, i_axis_size)
if b > e:
raise ValueError('for positive step, begin cannot be > end.')
abs_b, abs_e, abs_s = b,e,s
else:
b = np.clip(b, 0, i_axis_size-1)
e = np.clip(e, -1, i_axis_size)
if b <= e:
raise ValueError('for negative step, begin cannot be <= end.')
abs_s = -s
abs_e = b + 1
abs_b = b - (math.ceil( (b-e) / abs_s ) -1) * abs_s
# for every o_shape_kd axis
# we have exact begin,step values to fetch value from input
axes_bes.append( (b,e,s))
axes_abs_bes.append( (abs_b, abs_e, abs_s))
if i_axis_size != 1 and not (b == 0 and e == i_axis_size and s == 1):
# Such params of axis slice will change input, thus output cannot be as just reshaped input
output_is_reshaped = False
# Compute output_axis_size based on begin,end,step
o_axis_size = max(0, math.ceil ( (e-b) / (s if s != 0 else 1) ) )
if o_axis_size >= 1:
# >= 1 : select range of indexes, axis will remain
o_shape.append(o_axis_size)
# ^ othwerwise axis will be supressed
# o_shape with keepdims, must match ndim of input shape
o_shape_kd.append( max(1,o_axis_size) )
self.just_reshaped = output_is_reshaped
self.o_shape = AShape(o_shape)
self.o_shape_kd = AShape(o_shape_kd)
self.axes_bes = axes_bes
self.axes_abs_bes = axes_abs_bes
```
#### File: _internal/op/binary_erode_circle.py
```python
from ..AShape import AShape
from ..backend import Kernel
from ..HKernel import HKernel
from ..info import Conv2DInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def binary_erode_circle (input_t : Tensor, radius : int = 1, iterations : int = 1, dtype=None):
"""
Binary erode operator using circle kernel with radius.
input_t Tensor (...,H,W)
per-element of H,W, set 1 if all neighbor elements inside circle with radius != 0.
otherwise set 0.
"""
op = SCacheton.get(_BinaryErodeOp, input_t.shape, input_t.dtype, int(radius), dtype)
device = input_t.get_device()
if radius <= 0 or iterations <= 0:
return input_t.copy()
else:
for i in range(iterations):
if i == 0:
buf_in = input_t
else:
buf_in, buf_out = buf_out, buf_in
if i <= 1:
buf_out = Tensor( op.o_shape, op.o_dtype, device=device )
device.run_kernel(op.forward_krn, buf_out.get_buffer(), buf_in.get_buffer() )
return buf_out
class _BinaryErodeOp():
def __init__(self, i_shape : AShape, i_dtype, radius, o_dtype):
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
if i_shape.ndim < 2:
raise ValueError(f'i_shape.ndim must be >= 2')
KS = radius*2+1
IH,IW = i_shape[-2:]
ci = Conv2DInfo(IH, IW, KS, KS, stride=1, dilation=1, padding='same')
self.o_shape = o_shape = i_shape
self.forward_krn = Kernel(global_shape=(o_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
#define PADL {ci.PADL}
#define PADT {ci.PADT}
#define RADIUS {radius}
#define KS {KS}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'O', o_shape.ndim)}
{'#pragma unroll' if KS <= 16 else ''}
for (int kh=0; kh<KS; ++kh)
{'#pragma unroll' if KS <= 16 else ''}
for (int kw=0; kw<KS; ++kw)
{{
if ( hypot( (float)(kh-RADIUS), (float)(kw-RADIUS) ) <= RADIUS)
{{
int im2 = -PADT + kh + om2;
int im1 = -PADL + kw + om1;
I_TYPE i_val = (im1 >= 0 & im1 < Im1 & im2 >= 0 & im2 < Im2) ?
I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='im2,im1' )}))
: 0;
if (i_val == (I_TYPE)0)
{{
O_GLOBAL_STORE(gid, (O_TYPE) 0 );
return;
}}
}}
}}
O_GLOBAL_STORE(gid, (O_TYPE) 1 );
}}
""")
```
#### File: _internal/op/binary_morph.py
```python
from ..Tensor import Tensor
from .binary_dilate_circle import binary_dilate_circle
from .binary_erode_circle import binary_erode_circle
from .gaussian_blur import gaussian_blur
from .pad import pad
from .cast import cast
def binary_morph(input_t : Tensor, erode_dilate : int, blur : float, fade_to_border : bool = False, dtype=None) -> Tensor:
"""
Apply optional binary erode/dilate and optional blur.
input_t (...,H,W) tensor. Non zero values will be treated as 1.
erode_dilate int >= 0 amount of pixels to dilate
blur float >= 0 amount of pixels to blur
fade_to_border(False) clip the image in order
to fade smoothly to the border with specified blur amount
"""
x = input_t
H,W = input_t.shape[-2:]
x = pad(x, (...,(H,H),(W,W)), mode='constant', constant_value=0)
if erode_dilate > 0:
x = binary_erode_circle(x, radius=1, iterations=max(1,erode_dilate//2))
elif erode_dilate < 0:
x = binary_dilate_circle(x, radius=1, iterations=max(1,-erode_dilate//2) )
if fade_to_border:
h_clip_size = H + blur // 2
w_clip_size = W + blur // 2
x[...,:h_clip_size,:] = 0
x[...,-h_clip_size:,:] = 0
x[...,:,:w_clip_size] = 0
x[...,:,-w_clip_size:] = 0
if blur > 0:
x = gaussian_blur(x, blur * 0.250, dtype=dtype)
else:
x = cast(x, dtype=dtype)
return x[...,H:-H,W:-W]
```
#### File: _internal/op/remap_np_affine.py
```python
import numpy as np
from ..AShape import AShape
from ..backend import Kernel
from ..EInterpolation import EInterpolation
from ..HKernel import HKernel
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def remap_np_affine (input_t : Tensor, affine_n : np.ndarray, interpolation : EInterpolation = None, inverse=False, output_size=None, post_op_text=None, dtype=None) -> Tensor:
"""
remap affine operator for all channels using single numpy affine mat
arguments
input_t Tensor (...,H,W)
affine_n np.array (2,3)
interpolation EInterpolation
post_op_text cl kernel
post operation with output float value named 'O'
example 'O = 2*O;'
output_size (w,h)
dtype
"""
if affine_n.shape != (2,3):
raise ValueError('affine_n.shape must be (2,3)')
op = SCacheton.get(_RemapAffineOp, input_t.shape, input_t.dtype, interpolation, output_size, post_op_text, dtype)
output_t = Tensor( op.o_shape, op.o_dtype, device=input_t.get_device() )
((a, b, c),
(d, e, f)) = affine_n
if not inverse:
# do inverse by default, match cv2.warpAffine behaviour
D = a*e - b*d
D = 1.0 / D if D != 0.0 else 0.0
a, b, c, d, e, f = ( e*D, -b*D, (b*f-e*c)*D ,
-d*D, a*D, (d*c-a*f)*D )
input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer(),
np.float32(a), np.float32(b), np.float32(c), np.float32(d), np.float32(e), np.float32(f) )
return output_t
class _RemapAffineOp():
def __init__(self, i_shape : AShape, i_dtype, interpolation, o_size, post_op_text, o_dtype):
if np.dtype(i_dtype).type == np.bool_:
raise ValueError('np.bool_ dtype of i_dtype is not supported.')
if i_shape.ndim < 2:
raise ValueError('i_shape.ndim must be >= 2 (...,H,W)')
if interpolation is None:
interpolation = EInterpolation.LINEAR
IH,IW = i_shape[-2:]
if o_size is not None:
OH,OW = o_size
else:
OH,OW = IH,IW
o_shape = AShape( (OH,OW) )
if i_shape.ndim > 2:
o_shape = i_shape[:-2] + o_shape
self.o_shape = o_shape
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
if post_op_text is None:
post_op_text = ''
if interpolation == EInterpolation.LINEAR:
self.forward_krn = Kernel(global_shape=(o_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME,
float a, float b, float c,
float d, float e, float f)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'O', o_shape.ndim)}
float cx01 = om1*a + om2*b + c;
float cy01 = om1*d + om2*e + f;
float cx0f = floor(cx01); int cx0 = (int)cx0f;
float cy0f = floor(cy01); int cy0 = (int)cy0f;
float cx1f = cx0f+1; int cx1 = (int)cx1f;
float cy1f = cy0f+1; int cy1 = (int)cy1f;
float p00 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy0,cx0')}));
float p01 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy0,cx1')}));
float p10 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy1,cx0')}));
float p11 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy1,cx1')}));
p00 *= (cx1f - cx01)*(cy1f - cy01)*(cy0 >= 0 & cy0 < Im2 & cx0 >= 0 & cx0 < Im1);
p01 *= (cx01 - cx0f)*(cy1f - cy01)*(cy0 >= 0 & cy0 < Im2 & cx1 >= 0 & cx1 < Im1);
p10 *= (cx1f - cx01)*(cy01 - cy0f)*(cy1 >= 0 & cy1 < Im2 & cx0 >= 0 & cx0 < Im1);
p11 *= (cx01 - cx0f)*(cy01 - cy0f)*(cy1 >= 0 & cy1 < Im2 & cx1 >= 0 & cx1 < Im1);
float O = p00 + p01 + p10 + p11;
{post_op_text}
O_GLOBAL_STORE(gid, O);
}}
""")
elif interpolation == EInterpolation.CUBIC:
self.forward_krn = Kernel(global_shape=(o_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
float cubic(float p0, float p1, float p2, float p3, float x)
{{
float a0 = p1;
float a1 = p2 - p0;
float a2 = 2 * p0 - 5 * p1 + 4 * p2 - p3;
float a3 = 3 * (p1 - p2) + p3 - p0;
return a0 + 0.5 * x * (a1 + x * (a2 + x * a3));
}}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME,
float a, float b, float c,
float d, float e, float f)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'O', o_shape.ndim)}
float cx01f = om1*a + om2*b + c;
float cy01f = om1*d + om2*e + f;
float cxf = floor(cx01f); int cx = (int)cxf;
float cyf = floor(cy01f); int cy = (int)cyf;
float dx = cx01f-cxf;
float dy = cy01f-cyf;
float row[4];
#pragma unroll
for (int y=cy-1, j=0; y<=cy+2; y++, j++)
{{
float col[4];
#pragma unroll
for (int x=cx-1, i=0; x<=cx+2; x++, i++)
{{
float sxy = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='y,x')}));
col[i] = sxy*(y >= 0 & y < Im2 & x >= 0 & x < Im1);
}}
row[j] = cubic(col[0], col[1], col[2], col[3], dx);
}}
float O = cubic(row[0], row[1], row[2], row[3], dy);
{post_op_text}
O_GLOBAL_STORE(gid, O);
}}
""")
elif interpolation in [EInterpolation.LANCZOS3, EInterpolation.LANCZOS4]:
RAD = 3 if interpolation == EInterpolation.LANCZOS3 else 4
self.forward_krn = Kernel(global_shape=(o_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME,
float a, float b, float c,
float d, float e, float f)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'O', o_shape.ndim)}
float cx01f = om1*a + om2*b + c;
float cy01f = om1*d + om2*e + f;
float cxf = floor(cx01f); int cx = (int)cxf;
float cyf = floor(cy01f); int cy = (int)cyf;
#define RAD {RAD}
float Fy[2 * RAD];
float Fx[2 * RAD];
#pragma unroll
for (int y=cy-RAD+1, j=0; y<=cy+RAD; y++, j++)
{{
float dy = fabs(cy01f - y);
if (dy < 1e-4) Fy[j] = 1;
else if (dy > RAD) Fy[j] = 0;
else Fy[j] = ( RAD * sin(M_PI * dy) * sin(M_PI * dy / RAD) ) / ( (M_PI*M_PI)*dy*dy );
}}
#pragma unroll
for (int x=cx-RAD+1, i=0; x<=cx+RAD; x++, i++)
{{
float dx = fabs(cx01f - x);
if (dx < 1e-4) Fx[i] = 1;
else if (dx > RAD) Fx[i] = 0;
else Fx[i] = ( RAD * sin(M_PI * dx) * sin(M_PI * dx / RAD) ) / ( (M_PI*M_PI)*dx*dx );
}}
float FxFysum = 0;
float O = 0;
#pragma unroll
for (int y=cy-RAD+1, j=0; y<=cy+RAD; y++, j++)
#pragma unroll
for (int x=cx-RAD+1, i=0; x<=cx+RAD; x++, i++)
{{
float sxy = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='y,x')}));
float Fxyv = Fx[i]*Fy[j];
FxFysum += Fxyv;
O += sxy*Fxyv*(y >= 0 & y < Im2 & x >= 0 & x < Im1);
}}
O = O / FxFysum;
{post_op_text}
O_GLOBAL_STORE(gid, O);
}}
""")
else:
raise ValueError(f'Unsupported interpolation type {interpolation}')
```
#### File: _internal/op/slice_.py
```python
from typing import List
import numpy as np
from ..AShape import AShape
from ..AAxes import AAxes
from ..backend import Kernel
from ..HKernel import HKernel
from ..HType import HType
from ..info import SliceInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def split(input_t : Tensor, axis, keepdims=False) -> List[Tensor]:
"""
arguments
input_t Tensor
axis
"""
shape = input_t.shape
result = []
for i in range(shape[axis]):
slices = [slice(None, None, None)]*shape.ndim
slices[axis] = i if not keepdims else slice(i,i+1,1)
result.append( slice_(input_t, slices) )
return result
def slice_(input_t : Tensor, slices, dtype : np.dtype = None, output_t=None, is_add_to_output=False) -> Tensor:
"""
arguments:
input_t input tensor
slices argument received from class.__getitem__(slices)
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size.
gradfn will not be set.
is_add_to_output add result to output_t if output_t is set.
Remark.
Slicing logic is not the same as numpy:
For example np[2:0:1] slice will produce invalid array with zero index,
but nn.slice() will select 2 index, same as val_t[2].
"""
op = SCacheton.get(_SliceOp, input_t.shape, input_t.dtype, dtype, HType.hashable_slices(slices), False if output_t is None else is_add_to_output )
o_shape = op.slice_info.o_shape
if output_t is None:
if op.slice_info.just_reshaped:
return input_t.reshape(o_shape)
else:
output_t = Tensor(o_shape, op.o_dtype, device=input_t.get_device())
elif output_t.shape.size != o_shape.size:
raise ValueError(f'output_t must have size {o_shape.size}')
input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer() )
return output_t
class _SliceOp:
def __init__(self, i_shape : AShape, i_dtype : np.dtype, o_dtype : np.dtype, slices, is_add_to_output):
self.slice_info = slice_info = SliceInfo(i_shape, slices)
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
self.forward_krn = Kernel(global_shape=(slice_info.o_shape_kd.size,), kernel_text=f"""
{HKernel.define_tensor('O', slice_info.o_shape_kd, o_dtype )}
{HKernel.define_tensor('I', i_shape, i_dtype )}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'o', slice_info.o_shape_kd.ndim)}
{chr(10).join( f'size_t i{i} = {b} + o{i} * {s}; ' for i, (b,e,s) in enumerate(slice_info.axes_bes) ) }
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}(gid, I_GLOBAL_LOAD( I_IDX({HKernel.axes_seq_enum('i', i_shape.ndim)}) ) );
}}
""")
```
#### File: _internal/op/stack.py
```python
import numpy as np
from typing import List
from ..AShape import AShape
from ..backend import Kernel
from ..HArgs import HArgs
from ..HKernel import HKernel
from ..HType import HType
from ..info import StackInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def stack(tensor_list : List[Tensor], axis, dtype=None, output_t=None, is_add_to_output=False):
"""
Stack operator.
arguments:
tensor_list List of Tensors
axis Int
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size.
gradfn will not be set.
is_add_to_output add result to output_t if output_t is set.
"""
HArgs.check_zero_get_length(tensor_list)
HArgs.check_all_tensors(tensor_list)
device = HArgs.check_get_same_device(tensor_list)
shape_list, dtype_list, _ = HArgs.decompose(tensor_list)
op = SCacheton.get(_StackOp, shape_list, dtype_list, int(axis), dtype, False if output_t is None else is_add_to_output)
if output_t is None:
output_t = Tensor (op.info.o_shape, op.o_dtype, device=device)
elif output_t.shape.size != op.info.o_shape.size:
raise ValueError(f'output_t must have size {op.info.o_shape.size}')
for i, krn in enumerate(op.forward_krns):
device.run_kernel(krn, output_t.get_buffer(), tensor_list[i].get_buffer(), np.int64(i) )
return output_t
class _StackOp:
def __init__(self, shape_list : List[AShape], dtype_list : List[np.dtype], axis, o_dtype, is_add_to_output):
self.stack_count = stack_count = len(shape_list)
i_shape = shape_list[0]
if not all (s == i_shape for s in shape_list):
raise ValueError('All shapes must be the same')
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else HType.get_most_weighted_dtype (dtype_list)
self.info = info = StackInfo(i_shape, axis, stack_count)
self.forward_krns = forward_krns = []
for i_dtype in dtype_list:
forward_krns.append( Kernel(global_shape=(i_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', info.o_shape, o_dtype )}
{HKernel.define_tensor('I', i_shape, i_dtype )}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME, long i_new_idx)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'I', i_shape.ndim)}
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}( O_IDX({HKernel.axes_seq_enum('I', i_shape.ndim, new_axis=('i_new_idx', info.axis))}), I_GLOBAL_LOAD(gid) );
}}
"""))
```
#### File: _internal/op/tile.py
```python
import numpy as np
from typing import List
from ..AShape import AShape
from ..backend import Kernel
from ..HKernel import HKernel
from ..info import TileInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def tile(input_t : Tensor, tiles : List[int], dtype : np.dtype = None, output_t=None, is_add_to_output=False):
"""
Tile operator
arguments
tiles Iterable of ints
dtype
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size.
gradfn will not be set.
is_add_to_output add result to output_t if output_t is set.
"""
op = SCacheton.get(_TileOp, input_t.shape, input_t.dtype, tuple(int(tile) for tile in tiles), dtype, False if output_t is None else is_add_to_output)
if output_t is None:
output_t = Tensor (op.info.o_shape, op.o_dtype, device=input_t.get_device())
elif output_t.shape.size != op.info.o_shape.size:
raise ValueError(f'output_t must have size {op.info.o_shape.size}')
input_t.get_device().run_kernel( op.forward_krn, output_t.get_buffer(), input_t.get_buffer())
return output_t
class _TileOp:
def __init__(self, i_shape : AShape, i_dtype, tiles, o_dtype, is_add_to_output):
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
self.info = info = TileInfo(i_shape, tiles)
self.forward_krn = Kernel(global_shape=(info.o_shape.size,), kernel_text=f"""
{HKernel.define_tensor('I', i_shape, i_dtype)}
{HKernel.define_tensor('O', info.o_shape, o_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs ('gid', 'O', info.o_shape.ndim)}
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'} (gid, I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', info.o_shape.ndim)})) );
}}
""")
```
#### File: _internal/op/transpose.py
```python
import numpy as np
from ..AAxes import AAxes
from ..AShape import AShape
from ..backend import Kernel
from ..HKernel import HKernel
from ..info import TransposeInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def transpose(input_t : Tensor, axes_order, op_text=None, dtype : np.dtype = None, output_t : Tensor=None, is_add_to_output=False) -> Tensor:
"""
arguments:
axes_order Int
Iterable of ints
None
dtype cast to dtype
op_text(None) optional op with value during transpose.
'O = I'
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size
"""
op = SCacheton.get(_TransposeOp, input_t.shape, input_t.dtype, dtype, AAxes(axes_order), op_text, False if output_t is None else is_add_to_output )
if output_t is None:
output_t = Tensor (op.o_shape, op.o_dtype, device=input_t.get_device())
elif output_t.shape.size != op.o_shape.size:
raise ValueError(f'output_t must have size {op.o_shape.size}')
input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer() )
return output_t
class _TransposeOp:
def __init__(self, i_shape : AShape, i_dtype : np.dtype, o_dtype : np.dtype, axes_order : AAxes, op_text, is_add_to_output : bool ):
self.axes_order = axes_order
self.o_shape = o_shape = TransposeInfo(i_shape, axes_order).o_shape
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
if op_text is None:
op_text = 'O = I'
self.forward_krn = Kernel(global_shape=(i_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'i', i_shape.ndim)}
I_TYPE I = I_GLOBAL_LOAD(gid);
O_TYPE O;
{op_text};
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}( O_IDX({HKernel.axes_order_enum('I', axes_order )}), O );
}}""")
```
#### File: _internal/op/warp_affine.py
```python
import numpy as np
from ..AShape import AShape
from ..initializer import InitCoords2DArange
from ..SCacheton import SCacheton
from ..Tensor import Tensor
from .matmul import matmul
from .remap import remap
def warp_affine (input_t : Tensor, affine_t : Tensor, output_size=None, dtype=None) -> Tensor:
"""
arguments
input_t Tensor(...,H,W)
affine_t Tensor(...,2,3)
affine matrix
example of identity affine matrix
[1,0,0],
[0,1,0]
...-head part of shapes will be broadcasted to each other
output_size(None)
tuple of 2 ints (HW)
of output size
if None , size will not be changed
"""
op = SCacheton.get(_WarpAffineOp, input_t.shape, input_t.dtype, affine_t.shape, affine_t.dtype, output_size)
affine_t = affine_t.transpose( op.affine_transpose_axes, dtype=np.float32 ).reshape( (-1,3,2) )
coords_t = Tensor(op.coords_shape, np.float32, device=input_t.get_device(), initializer=op.coords_init )
coords_t = coords_t.reshape(op.coords_reshape)
coords_t = matmul(coords_t, affine_t).reshape(op.coords_affined_shape)
output_t = remap(input_t, coords_t, dtype=dtype)
return output_t
class _WarpAffineOp():
def __init__(self, i_shape : AShape, i_dtype, a_shape : AShape, a_dtype, o_size):
if np.dtype(i_dtype).type == np.bool_:
raise ValueError('np.bool_ dtype of i_dtype is not supported.')
if np.dtype(a_dtype).type == np.bool_:
raise ValueError('np.bool_ dtype of a_dtype is not supported.')
if i_shape.ndim < 2:
raise ValueError('i_shape.ndim must be >= 2 (...,H,W)')
if a_shape.ndim < 2:
raise ValueError(f'a_shape.ndim must be >= 2 (...,2,3)')
if a_shape[-2] != 2 or a_shape[-1] != 3:
raise ValueError('Last a_shape dims must be == (...,2,3)')
IH,IW = i_shape[-2:]
if o_size is not None:
OH,OW = o_size
else:
OH,OW = IH,IW
self.coords_shape = AShape( (OH,OW,3) )
self.coords_affined_shape = AShape( (OH,OW,2) )
if a_shape.ndim > 2:
self.coords_shape = a_shape[:-2] + self.coords_shape
self.coords_affined_shape = a_shape[:-2] + self.coords_affined_shape
self.coords_init = InitCoords2DArange(0,OH-1,0,OW-1)
self.coords_reshape = (-1,OH*OW,3)
self.affine_transpose_axes = a_shape.axes_arange().swapped_axes(-2,-1)
```
#### File: xlib/face/FaceWarper.py
```python
from typing import Iterable, Tuple, Union
import cv2
import numpy as np
from ..math import Affine2DMat, Affine2DUniMat
class FaceWarper:
def __init__(self,
img_to_face_uni_mat : Affine2DUniMat,
align_rot_deg : Union[None, float, Tuple[float, float] ] = [-15,15],
align_scale : Union[None, float, Tuple[float, float] ] = [-0.15, 0.15],
align_tx : Union[None, float, Tuple[float, float] ] = [-0.05, 0.05],
align_ty : Union[None, float, Tuple[float, float] ] = [-0.05, 0.05],
rw_grid_cell_count : Union[None, int, Tuple[int, int] ] = [3,7],
rw_grid_rot_deg : Union[None, float, Tuple[float, float] ] = [-180,180],
rw_grid_scale : Union[None, float, Tuple[float, float] ] = [-0.25, 0.25],
rw_grid_tx : Union[None, float, Tuple[float, float] ] = [-0.25, 0.25],
rw_grid_ty : Union[None, float, Tuple[float, float] ] = [-0.25, 0.25],
rnd_state : np.random.RandomState = None,
):
"""
Max quality one-pass face augmentation via geometric transformations with provided random range or exact values.
img_to_face_uni_mat Affine2DUniMat
Affine2DUniMat given from FLandmarks2D.calc_cut
it is an uniform affineMat to transform original image to aligned face
align_* rw_grid_*
exact augmentation parameters or range for random generation.
"""
self._img_to_face_uni_mat = img_to_face_uni_mat
self._face_to_img_uni_mat = img_to_face_uni_mat.invert()
rnd_state = np.random.RandomState()
rnd_state.set_state(rnd_state.get_state() if rnd_state is not None else np.random.RandomState().get_state())
self._align_rot_deg = rnd_state.uniform(*align_rot_deg) if isinstance(align_rot_deg, Iterable) else align_rot_deg
self._align_scale = rnd_state.uniform(*align_scale) if isinstance(align_scale, Iterable) else align_scale
self._align_tx = rnd_state.uniform(*align_tx) if isinstance(align_tx, Iterable) else align_tx
self._align_ty = rnd_state.uniform(*align_ty) if isinstance(align_ty, Iterable) else align_ty
self._rw_grid_cell_count = rnd_state.randint(*rw_grid_cell_count) if isinstance(rw_grid_cell_count, Iterable) else rw_grid_cell_count
self._rw_grid_rot_deg = rnd_state.uniform(*rw_grid_rot_deg) if isinstance(rw_grid_rot_deg, Iterable) else rw_grid_rot_deg
self._rw_grid_scale = rnd_state.uniform(*rw_grid_scale) if isinstance(rw_grid_scale, Iterable) else rw_grid_scale
self._rw_grid_tx = rnd_state.uniform(*rw_grid_tx) if isinstance(rw_grid_tx, Iterable) else rw_grid_tx
self._rw_grid_ty = rnd_state.uniform(*rw_grid_ty) if isinstance(rw_grid_ty, Iterable) else rw_grid_ty
self._warp_rnd_mat = Affine2DUniMat.from_transformation(0.5, 0.5, self._rw_grid_rot_deg, 1.0+self._rw_grid_scale, self._rw_grid_tx, self._rw_grid_ty)
self._align_rnd_mat = Affine2DUniMat.from_transformation(0.5, 0.5, self._align_rot_deg, 1.0+self._align_scale, self._align_tx, self._align_ty)
self._rnd_state_state = rnd_state.get_state()
self._cached = {}
def get_aligned_random_transform_mat(self) -> Affine2DUniMat:
"""
returns Affine2DUniMat that represents transformation from aligned face to randomly transformed aligned face
"""
mat1 = self._img_to_face_uni_mat
mat2 = (self._face_to_img_uni_mat * self._align_rnd_mat).invert()
pts = [ [0,0], [1,0], [1,1]]
src_pts = mat1.transform_points(pts)
dst_pts = mat2.transform_points(pts)
return Affine2DUniMat.from_3_pairs(src_pts, dst_pts)
def transform(self, img : np.ndarray, out_res : int, random_warp : bool = True) -> np.ndarray:
"""
transform an image.
Subsequent calls will output the same result for any img shape and out_res.
img np.ndarray (HWC)
out_res int
random_warp(True) bool
"""
H,W = img.shape[:2]
key = (H,W,random_warp)
data = self._cached.get(key, None)
if data is None:
rnd_state = np.random.RandomState()
rnd_state.set_state( self._rnd_state_state )
self._cached[key] = data = self._gen(H,W, random_warp, out_res, rnd_state=rnd_state )
image_grid, face_mask = data
new_img = cv2.remap(img, image_grid, None, interpolation=cv2.INTER_LANCZOS4)
new_img *= face_mask
return new_img
def _gen(self, H, W, random_warp, out_res, rnd_state):
"""generate grid and mask"""
# make identity grid
image_grid = np.stack(np.meshgrid(np.linspace(0., 1.0, H, dtype=np.float32),
np.linspace(0., 1.0, W, dtype=np.float32)), -1)
if random_warp:
# make a random face_warp_grid in the space of the face
face_warp_grid = FaceWarper._gen_random_warp_uni_grid_diff(out_res, self._rw_grid_cell_count, 0.12, rnd_state)
# apply random transformation mat of face_warp_grid to mat that transforms face to image
face_warp_grid_uni_mat = self._face_to_img_uni_mat * self._warp_rnd_mat
# warp face_warp_grid to the space of image using previous mat and merge with image_grid
image_grid += cv2.warpAffine(face_warp_grid, face_warp_grid_uni_mat.to_exact_mat(out_res,out_res, W, H), (W,H), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
# scale uniform grid to image size
image_grid *= (H-1, W-1)
# apply random transformations for align mat
img_to_face_rnd_mat = (self._face_to_img_uni_mat * self._align_rnd_mat).invert().to_exact_mat(W,H,out_res,out_res)
# warp image_grid to face space
image_grid = cv2.warpAffine(image_grid, img_to_face_rnd_mat, (out_res,out_res), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE )
# make mask to refine image-boundary visible in face space
face_mask = cv2.warpAffine( np.ones( (H,W), dtype=np.uint8), img_to_face_rnd_mat, (out_res,out_res), flags=cv2.INTER_NEAREST)[...,None]
return image_grid, face_mask
def _gen_random_warp_uni_grid_diff(size: int, cell_count, cell_mod, rnd_state) -> np.ndarray:
"""
generates square uniform random warp coordinate differences
grid of shape (size, size, 2) (x,y)
cell_count(3) 3+
cell_mod (0.12) [ 0 .. 0.24 ]
"""
cell_count = max(3, cell_count)
cell_mod = np.clip(cell_mod, 0, 0.24)
cell_size = 1.0 / (cell_count-1)
grid = np.zeros( (cell_count,cell_count, 2), dtype=np.float32 )
grid[1:-1,1:-1, 0:2] += rnd_state.uniform (low=-cell_size*cell_mod, high=cell_size*cell_mod, size=(cell_count-2, cell_count-2, 2) )
grid = cv2.resize(grid, (size, size), interpolation=cv2.INTER_CUBIC ).astype(np.float32)
# Linear dump border cells to zero
border_size = size // cell_count
dumper = np.linspace(0, 1, border_size, dtype=np.float32)
grid[:border_size, :,:] *= dumper[:,None,None]
grid[-border_size:,:,:] *= dumper[::-1,None,None]
grid[:,:border_size ,:] *= dumper[None,:,None]
grid[:,-border_size:,:] *= dumper[None,::-1,None]
return grid
```
#### File: xlib/face/UPerson.py
```python
import uuid
from typing import Union
from .IState import IState
class UPerson(IState):
def __init__(self):
"""
"""
self._uuid : Union[bytes, None] = None
self._name : Union[str, None] = None
self._age : Union[int, None] = None
def __str__(self): return f"UPerson UUID:[...{self._uuid[-4:].hex()}] name:[{self._name}] age:[{self._age}]"
def __repr__(self): return self.__str__()
@staticmethod
def from_state(state : dict) -> 'UPerson':
ufm = UPerson()
ufm.restore_state(state)
return ufm
def restore_state(self, state : dict):
self._uuid = state.get('_uuid', None)
self._name = state.get('_name', None)
self._age = state.get('_age', None)
def dump_state(self) -> dict:
return {'_uuid' : self._uuid,
'_name' : self._name,
'_age' : self._age,
}
def get_uuid(self) -> Union[bytes, None]:
if self._uuid is None:
self._uuid = uuid.uuid4().bytes
return self._uuid
def set_uuid(self, uuid : Union[bytes, None]):
if uuid is not None and not isinstance(uuid, bytes):
raise ValueError(f'uuid must be an instance of bytes or None')
self._uuid = uuid
def get_name(self) -> Union[str, None]: return self._name
def set_name(self, name : Union[str, None]):
if name is not None and not isinstance(name, str):
raise ValueError(f'name must be an instance of str or None')
self._name = name
def get_age(self) -> Union[str, None]: return self._age
def set_age(self, age : Union[int, None]):
if age is not None and not isinstance(age, int):
raise ValueError(f'age must be an instance of int or None')
self._age = age
```
#### File: xlib/image/_misc.py
```python
import numpy as np
def get_NHWC_shape(img : np.ndarray):
"""
returns NHWC shape where missed dims are 1
"""
ndim = img.ndim
if ndim not in [2,3,4]:
raise ValueError(f'img.ndim must be 2,3,4, not {ndim}.')
if ndim == 2:
N, (H,W), C = 1, img.shape, 1
elif ndim == 3:
N, (H,W,C) = 1, img.shape
else:
N,H,W,C = img.shape
return N,H,W,C
```
#### File: xlib/logic/DelayedBuffers.py
```python
from collections import deque
from datetime import datetime
class DelayedBuffers:
"""
Buffers temporal data and "shows" it evenly with target delay.
first frame created
0 ms
| 1500ms first frame arrived, set minimum target delay
|
more data arrived
| || || | |
2000ms target delay (set by user)
|
^-----buffered data-----^
| | | | | | | | |
^----show data evenly---^
"""
class ProcessResult:
__slots__ = ['new_data']
def __init__(self):
self.new_data = None
def __init__(self):
self._buffers = deque()
self._target_delay = 0
self._last_ts = datetime.now().timestamp()
self._last_data = None
self._avg_delay = 1.0
def _update_avg_frame_delay(self):
buffers = self._buffers
if len(buffers) >= 2:
x = tuple(buffer[0] for buffer in buffers)
self._avg_delay = min(1.0, (max(x)-min(x)) / (len(x)-1) )
def get_avg_delay(self): return self._avg_delay
def add_buffer(self, timestamp : float, data):
buffers = self._buffers
buffers_len = len(buffers)
for i in range(buffers_len):
if timestamp < buffers[i][0]:
buffers.insert( i, (timestamp, data))
self._update_avg_frame_delay()
return
buffers.append( (timestamp, data) )
self._update_avg_frame_delay()
def set_target_delay(self, target_delay_sec : float):
self._target_delay = target_delay_sec
def process(self) -> 'DelayedBuffers.ProcessResult':
"""
processes inner logic
returns DelayedBuffers.ProcessResult()
"""
result = DelayedBuffers.ProcessResult()
buffers = self._buffers
now = datetime.now().timestamp()
if now - self._last_ts >= self._avg_delay:
self._last_ts += self._avg_delay
if len(buffers) != 0:
# Find nearest to target_delay
nearest_i = -1
nearest_diff = 999999
target_delay = self._target_delay
buffers_to_remove = []
for i, buffer in enumerate(buffers):
ts = buffer[0]
diff = abs(now - ts - target_delay)
if diff <= nearest_diff:
nearest_i = i
nearest_diff = diff
buffers_to_remove.append(buffer)
else:
break
if len(buffers_to_remove) >= 2:
buffers_to_remove.pop(-1)
for buffer in buffers_to_remove:
buffers.remove(buffer)
self._update_avg_frame_delay()
if len(buffers) != 0:
_, new_data = buffers[0]
if not self._last_data is new_data:
self._last_data = new_data
result.new_data = new_data
return result
```
#### File: mp/csw/Progress.py
```python
from typing import Union
from ...python import EventListener
from .CSWBase import ControlClient, ControlHost
class _ProgressBase:
def __init__(self):
self._progress = None
self._on_progress_evl = EventListener()
self._call_on_msg('progress', self._on_msg_progress)
def _on_msg_progress(self, progress):
self._set_progress(progress)
def _set_progress(self, progress, block_event=False):
if progress is not None:
progress = int(progress)
if self._progress != progress:
self._progress = progress
if not block_event:
self._on_progress_evl.call(progress if progress is not None else 0)
return True
return False
def call_on_progress(self, func_or_list):
"""Call when the progress is changed."""
self._on_progress_evl.add(func_or_list)
def get_progress(self): return self._progress
class Progress:
"""
Progress control with 0..100 int value
Values:
None : uninitialized state
int/float : value
"""
class Config:
def __init__(self, title=None):
self._title = title
def get_title(self) -> Union[str, None]:
return self._title
class Host(ControlHost, _ProgressBase):
def __init__(self):
ControlHost.__init__(self)
_ProgressBase.__init__(self)
self._config = Progress.Config()
def _send_progress(self):
self._send_msg('progress', self._progress)
def set_progress(self, progress, block_event=False):
"""
progress number 0..100
block_event(False) on_progress event will not be called on this side
"""
if self._set_progress(progress, block_event=block_event):
self._send_progress()
def set_config(self, config : 'Progress.Config'):
self._send_msg('config', config)
class Client(ControlClient, _ProgressBase):
def __init__(self):
ControlClient.__init__(self)
_ProgressBase.__init__(self)
self._on_config_evl = EventListener()
self._call_on_msg('config', self._on_msg_config)
def _on_reset(self):
self._set_progress(None)
def _on_msg_config(self, config):
self._on_config_evl.call(config)
def call_on_config(self, func):
self._on_config_evl.add(func)
```
#### File: qt/core/QXTimer.py
```python
from PySide6.QtCore import *
class QXTimer(QTimer):
def __init__(self, interval=None, timeout=None, single_shot=False, start=False):
super().__init__()
if interval is not None:
self.setInterval(interval)
if timeout is not None:
self.timeout.connect(timeout)
if single_shot:
self.setSingleShot(True)
if start:
self.start()
```
#### File: qt/core/widget.py
```python
from collections import Iterable
from PySide6.QtCore import *
class BlockSignals:
def __init__(self, qt_widget_or_list, block_signals=True):
if not isinstance(qt_widget_or_list, (tuple,list)):
qt_widget_or_list = [qt_widget_or_list]
self.qt_widget_or_list = qt_widget_or_list
self.block_signals = block_signals
def __enter__(self):
if self.block_signals:
for qt_widget in self.qt_widget_or_list:
qt_widget.blockSignals(True)
return self
def __exit__(self, *_):
if self.block_signals:
for qt_widget in self.qt_widget_or_list:
qt_widget.blockSignals(False)
def enable(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
enable(widget)
else:
widget.setEnabled(True)
def disable(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
disable(widget)
else:
widget.setEnabled(False)
def hide(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
hide(widget)
else:
widget.hide()
def show(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
show(widget)
else:
widget.show()
def show_and_enable(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
show_and_enable(widget)
else:
widget.show()
widget.setEnabled(True)
def hide_and_disable(widget_or_list):
if not isinstance(widget_or_list, (tuple,list)):
widget_or_list = [widget_or_list]
for widget in widget_or_list:
if isinstance(widget, (tuple,list)):
hide_and_disable(widget)
else:
widget.hide()
widget.setEnabled(False)
def set_contents_margins(obj, contents_margins):
if contents_margins is not None:
if isinstance(contents_margins, int):
contents_margins = (contents_margins,)*4
if isinstance(contents_margins, Iterable):
obj.setContentsMargins(*contents_margins)
```
#### File: qt/widgets/QXFrameVBox.py
```python
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from .QXFrame import QXFrame
from .QXVBoxLayout import QXVBoxLayout
class QXFrameVBox(QXFrame):
def __init__(self, widgets=None, contents_margins=0, spacing=0, **kwargs):
super().__init__(layout=QXVBoxLayout(widgets=widgets, contents_margins=contents_margins, spacing=spacing), **kwargs)
```
#### File: qt/widgets/QXMainApplication.py
```python
from pathlib import Path
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from ..core.QXTimer import QXTimer
from ...db import KeyValueDB
from .forward_declarations import forward_declarations
class QXMainApplication(QApplication):
inst : 'QXMainApplication' = None
@staticmethod
def get_singleton() -> 'QXMainApplication':
if QXMainApplication.inst is None:
raise Exception('QXMainApplication must be instantiated')
return QXMainApplication.inst
def __init__(self, app_name=None, settings_dirpath : Path = None):
"""
base class for MainApplication
QXMainApplication.inst - singleton instance
settings_dirpath(None) where the data will be saved
"""
super().__init__([])
if QXMainApplication.inst is not None:
raise Exception('Only one singleton QXMainApplication is allowed')
QXMainApplication.inst = self
self._settings_dirpath = settings_dirpath
if settings_dirpath is not None:
self._app_data_path = settings_dirpath / 'app.dat'
else:
self._app_data_path = None
self._hierarchy_name_count = {}
self._app_db = KeyValueDB(self._app_data_path)
if app_name is not None:
self.setApplicationName(app_name)
self.setStyle('Fusion')
text_color = QColor(200,200,200)
self.setStyleSheet(f"""
QRadioButton::disabled {{
color: gray;
}}
""")
pal = QPalette()
pal.setColor(QPalette.ColorRole.Window, QColor(56, 56, 56))
pal.setColor(QPalette.ColorRole.Base, QColor(25, 25, 25))
pal.setColor(QPalette.ColorRole.AlternateBase, QColor(56, 56, 56))
pal.setColor(QPalette.ColorRole.ToolTipBase, text_color )
pal.setColor(QPalette.ColorRole.ToolTipText, text_color )
pal.setColor(QPalette.ColorRole.Text, text_color )
pal.setColor(QPalette.ColorRole.Button, QColor(56, 56, 56))
pal.setColor(QPalette.ColorRole.ButtonText, Qt.GlobalColor.white)
pal.setColor(QPalette.ColorRole.PlaceholderText, Qt.GlobalColor.darkGray)
pal.setColor(QPalette.ColorGroup.Active, QPalette.ColorRole.ButtonText, text_color)
pal.setColor(QPalette.ColorGroup.Inactive, QPalette.ColorRole.ButtonText, text_color)
pal.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.ButtonText, Qt.GlobalColor.gray)
pal.setColor(QPalette.ColorRole.WindowText, text_color )
pal.setColor(QPalette.ColorGroup.Active, QPalette.ColorRole.WindowText, text_color)
pal.setColor(QPalette.ColorGroup.Inactive, QPalette.ColorRole.WindowText, text_color)
pal.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.WindowText, Qt.GlobalColor.gray)
pal.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Text, Qt.GlobalColor.gray)
pal.setColor(QPalette.ColorRole.BrightText, Qt.GlobalColor.red)
pal.setColor(QPalette.ColorRole.Link, QColor(42, 130, 218))
pal.setColor(QPalette.ColorRole.Highlight, QColor(42, 130, 218))
pal.setColor(QPalette.ColorRole.HighlightedText, Qt.GlobalColor.black)
self.setPalette(pal)
self._reinitialize = False
self._timer = QXTimer(interval=10, timeout=self._on_10ms_timer, start=True)
def _on_10ms_timer(self):
self._app_db.process_messages()
if self._reinitialize:
self._reinitialize = False
self.on_reinitialize()
def register_QXWidget(self, widget) -> str:
"""
registers QXWidget, checks validity, returns an unique name
"""
hierarchy = []
iter_widget = widget
while True:
hierarchy.insert(0, iter_widget.__class__.__name__)
iter_parent_widget = iter_widget.parentWidget()
if iter_parent_widget is None:
break
iter_widget = iter_parent_widget
if not isinstance(iter_widget, forward_declarations.QXWindow):
raise Exception('Top widget must be a class of QXWindow')
if len(hierarchy) == 1:
# top level widgets(Windows) has no numerification
return hierarchy[0]
else:
hierarchy_name = '.'.join(hierarchy)
num = self._hierarchy_name_count.get(hierarchy_name, -1)
num = self._hierarchy_name_count[hierarchy_name] = num + 1
return f'{hierarchy_name}:{num}'
def clear_app_data(self):
"""
clear app data and reinitialize()
"""
self._app_db.clear()
self.reinitialize()
def get_app_data(self, key, default_value=None):
"""
returns picklable data by picklable key stored in app db
returns default_value if no data
"""
return self._app_db.get_value(key, default_value=default_value)
def set_app_data(self, key, value):
"""
set picklable data by picklable key stored to app db
"""
self._app_db.set_value(key, value )
def run(self):
"""
run the app
"""
self.exec()
self._app_db.finish_pending_jobs()
def reinitialize(self):
"""
start reinitialization of app.
"""
self._reinitialize = True
def on_reinitialize(self):
raise NotImplementedError()
def get_language(self) -> str:
return self.get_app_data('__app_language', 'en-US')
def set_language(self, lang : str) -> str:
"""
lang xx-YY
example: en-US ru-RU
"""
return self.set_app_data('__app_language', lang)
```
#### File: qt/widgets/QXOpenGLWidget.py
```python
from PySide6.QtGui import *
from PySide6.QtOpenGL import *
from PySide6.QtOpenGLWidgets import *
from PySide6.QtWidgets import *
from ._part_QXWidget import _part_QXWidget
class QXOpenGLWidget(QOpenGLWidget, _part_QXWidget):
def __init__(self, **kwargs):
super().__init__()
_part_QXWidget.__init__(self, **kwargs)
self._default_pal = QPalette( self.palette() )
def focusInEvent(self, ev : QFocusEvent):
super().focusInEvent(ev)
_part_QXWidget.focusInEvent(self, ev)
def resizeEvent(self, ev : QResizeEvent):
super().resizeEvent(ev)
_part_QXWidget.resizeEvent(self, ev)
```
#### File: qt/widgets/QXWidgetVBox.py
```python
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from .QXWidget import QXWidget
from .QXVBoxLayout import QXVBoxLayout
class QXWidgetVBox(QXWidget):
def __init__(self, widgets=None, contents_margins=0, spacing=0, **kwargs):
super().__init__(layout=QXVBoxLayout(widgets=widgets, contents_margins=contents_margins, spacing=spacing), **kwargs)
```
#### File: xlib/sjob/run_sequence.py
```python
import multiprocessing
from typing import Callable, List
from .. import console as lib_con
def _run_sequence(barrier, init_func, init_kwargs, final_func, process_func, pipe):
state = {}
if init_func is not None:
init_func(state, **init_kwargs)
barrier.wait()
while True:
if pipe.poll(0.05):
obj = pipe.recv()
cmd = obj['cmd']
if cmd == 'job':
result = process_func(state, obj['data'])
pipe.send({'cmd':'result', 'data': result})
elif cmd == 'finalize':
break
if final_func is not None:
final_func(state)
def run_sequence(data_list : List,
process_func : Callable,
init_func : Callable = None, init_kwargs : dict = None,
final_func : Callable = None,
mp_count : int = None, progress_bar_desc='Processing'):
"""
Simple Job to process list of picklable data.
init_func(state:dict, **init_kwargs)
process_func(state:dict, data) -> object
mp_count(None) number of subprocesses. Default - cores count.
"""
if mp_count is None:
mp_count = multiprocessing.cpu_count()
barrier = multiprocessing.Barrier(mp_count)
n_data_sent = [0]*mp_count
conn_list = [None]*mp_count
p_list = [None]*mp_count
for i in range(mp_count):
s_pipe, c_pipe = conn_list[i] = multiprocessing.Pipe()
p = p_list[i] = multiprocessing.Process(target=_run_sequence, args=(barrier, init_func, init_kwargs, final_func, process_func, c_pipe), daemon=True )
p.start()
data_list_len = len(data_list)
n_data_done = 0
i_data = 0
lib_con.progress_bar_print(0, data_list_len, desc=progress_bar_desc)
result = []
while n_data_done != data_list_len:
for n_conn, (s_pipe, _) in enumerate(conn_list):
if i_data < data_list_len:
if n_data_sent[n_conn] < 2:
n_data_sent[n_conn] += 1
data = data_list[i_data]
i_data += 1
s_pipe.send( {'cmd':'job', 'data':data} )
if s_pipe.poll(0):
obj = s_pipe.recv()
cmd = obj['cmd']
if cmd == 'result':
n_data_done += 1
lib_con.progress_bar_print(n_data_done, data_list_len, desc=progress_bar_desc)
n_data_sent[n_conn] -= 1
data = obj['data']
if data is not None:
result.append(data)
for n_conn, (s_pipe, _) in enumerate(conn_list):
s_pipe.send( {'cmd':'finalize'} )
return result
```
#### File: xlib/text/ascii_table.py
```python
import re
from typing import Union, List
_opts_halign = {'l':0,'c':1,'r':2}
_opts_valign = {'t':0,'m':1,'b':2}
"""
test = [
'|c99 TABLE NAME',
'|3 3-span left align\n multiline row |rb2 2-span right bottom align',
'|c WWWWWWWWWW |c WWWWWWWWWW |c WWWWWWWWWW |c WWWWWWWWWW |c WWWWWWWWWW',
'|c3 center aligned 3-span |r2 2-span right align',
'|r 0 |c3 Center align\nmulti\nline\nrow |l 1.00',
'|r 1 |r3 Right align\nmulti\nline\nrow |l 1.00',
'| ? | s',
'| ? | Three |c Two | asdasd | asdasd',
'| ? |3 asdasdasdasdasdasdasdasdasdasdasda |3 asdasd',
]
"""
class Column:
__slots__ = ['halign', 'valign', 'span', 'content']
def __init__(self, halign : int = 0, valign : int = 0, span : int = 1, content : str = None):
self.halign, self.valign, self.span, self.content = halign, valign, span, content
def __str__(self): return f'{self.content} s:{self.span}'
def __repr__(self): return self.__str__()
def split(self, sep : Union[str,int], maxsplit=-1) -> List['Column']:
result = []
if isinstance(sep, int):
c_split = [ self.content[:sep], self.content[sep:] ]
else:
c_split = self.content.split(sep, maxsplit=maxsplit)
if len(c_split) == 1:
return [self]
for c in c_split:
col = Column()
col.halign = self.halign
col.valign = self.valign
col.span = self.span
col.content = c
result.append(col)
return result
def copy(self, content=...):
if content is Ellipsis:
content=self.content
column = Column()
column.halign = self.halign
column.valign = self.valign
column.span = self.span
column.content = content
return column
def ascii_table(table_def : List[str],
min_table_width : int = None,
max_table_width : int = None,
fixed_table_width : int = None,
style_borderless = False,
left_border : str= '|',
right_border : str = '|',
border : str= '|',
row_symbol : str = '-',
col_def_delim = '|',
) -> str:
"""
arguments
table_def list of str
|[options] data - defines new column
options:
halign: l - left (default), c - center, r - right
valign: t - top (default), m - center, b - bottom
1..N - col span
example: ['|c99 TABLE NAME',
'|l first col |r second col']
"""
if style_borderless:
left_border, right_border, border, row_symbol = None, None, ' | ', None
if fixed_table_width is not None:
min_table_width = fixed_table_width
max_table_width = fixed_table_width
if min_table_width is not None and max_table_width is not None:
if min_table_width > max_table_width:
raise ValueError('min_table_width > max_table_width')
col_spacing = len(border) if border is not None else 0
cols_count = 0
# Parse columns in table_def
rows : List[List[Column]] = []
for raw_line in table_def:
# Line must starts with column definition
if len(raw_line) == 0 or raw_line[0] != col_def_delim:
raise ValueError(f'Line does not start with | symbol, content: "{raw_line}"')
# Parsing raw columns
row : List[Column] = []
i_raw_col = 0
raw_line_split = raw_line.split(col_def_delim)[1:]
raw_line_split_len = len(raw_line_split)
for n_raw_col, raw_col in enumerate(raw_line_split):
# split column options and content
col_opts, col_content = ( raw_col.split(' ', maxsplit=1) + [''] )[:2]
# Parse column options
col = Column(content=col_content)
for col_opt in re.findall('[lcr]|[tmb]|[0-9]+', col_opts.lower()):
h = _opts_halign.get(col_opt, None)
if h is not None:
col.halign = h
continue
v = _opts_valign.get(col_opt, None)
if v is not None:
col.valign = v
continue
col.span = max(1, int(col_opt))
row.append(col)
if n_raw_col != raw_line_split_len-1:
i_raw_col += col.span
else:
# total max columns, by last column without span
cols_count = max(cols_count, i_raw_col+1)
rows.append(row)
# Cut span of last cols to fit cols_count
for row in rows:
row[-1].span = cols_count - (sum(col.span for col in row) - row[-1].span)
# Compute cols border indexes
cols_border = [0]*cols_count
for i_col_max in range(cols_count+1):
for row in rows:
i_col = 0
col_border = 0
for col in row:
i_col += col.span
col_max_len = max([ len(x.strip()) for x in col.content.split('\n')])
col_border = cols_border[i_col-1] = max(cols_border[i_col-1], col_border + col_max_len)
if i_col >= i_col_max:
break
col_border += col_spacing
# fix zero cols border
for i_col, col_border in enumerate(cols_border):
if i_col != 0 and col_border == 0:
cols_border[i_col] = cols_border[i_col-1]
table_width = cols_border[-1] + (len(left_border) if left_border is not None else 0) + \
(len(right_border) if right_border is not None else 0)
# Determine size of table width
table_width_diff = 0
if max_table_width is not None:
table_width_diff = max(table_width_diff, table_width - max_table_width)
if min_table_width is not None:
table_width_diff = min(table_width_diff, table_width - min_table_width)
if table_width_diff != 0:
# >0 :shrink, <0 :expand table
diffs = [ x-y for x,y in zip(cols_border, [0]+cols_border[:-1] ) ]
while table_width_diff != 0:
if table_width_diff > 0:
max_diff = max(diffs)
if max_diff <= col_spacing:
raise Exception('Unable to shrink the table to fit max_table_width.')
diffs[ diffs.index(max_diff) ] -= 1
else:
diffs[ diffs.index(min(diffs)) ] += 1
table_width_diff += 1 if table_width_diff < 0 else -1
for i in range(len(cols_border)):
cols_border[i] = diffs[i] if i == 0 else cols_border[i-1] + diffs[i]
# recompute new table_width
table_width = cols_border[-1] + (len(left_border) if left_border is not None else 0) + \
(len(right_border) if right_border is not None else 0)
# Process columns for \n and col width
new_rows : List[List[List[Column]]] = []
for row in rows:
row_len = len(row)
# Gather multi rows for every col
cols_sub_rows = []
i_col = 0
col_border = 0
for col in row:
i_col += col.span
col_border_next = cols_border[i_col-1]
col_width = col_border_next-col_border
# slice col to sub rows by \n separator and col_width
col_content_split = [ x.strip() for x in col.content.split('\n') ]
cols_sub_rows.append([ x[i:i+col_width].strip() for x in col_content_split
for i in range(0, len(x), col_width) ])
col_border = col_border_next + col_spacing
cols_sub_rows_max = max([len(x) for x in cols_sub_rows])
for n, (col, col_sub_rows) in enumerate(zip(row, cols_sub_rows)):
valign = col.valign
unfilled_rows = cols_sub_rows_max-len(col_sub_rows)
if valign == 0: # top
col_sub_rows = col_sub_rows + ['']*unfilled_rows
elif valign == 1: # center
top_pad = unfilled_rows // 2
bottom_pad = unfilled_rows - top_pad
col_sub_rows = ['']*top_pad + col_sub_rows + ['']*bottom_pad
elif valign == 2: # bottom
col_sub_rows = ['']*unfilled_rows + col_sub_rows
cols_sub_rows[n] = col_sub_rows
sub_rows = [ [None]*row_len for _ in range(cols_sub_rows_max) ]
for n_col, col in enumerate(row):
for i in range(cols_sub_rows_max):
sub_rows[i][n_col] = col.copy(content=cols_sub_rows[n_col][i])
new_rows.append(sub_rows)
rows = new_rows
# Composing final lines
lines = []
row_line = row_symbol[0]*table_width if row_symbol is not None else None
if row_line is not None:
lines.append(row_line)
for sub_rows in rows:
for row in sub_rows:
line = ''
if left_border is not None:
line += left_border
i_col = 0
for col in row:
col_content = col.content
if i_col == 0:
col_border0 = 0
else:
if border is not None:
line += border
col_border0 = cols_border[i_col-1] + col_spacing
i_col += col.span
col_border1 = cols_border[i_col-1]
col_space = col_border1 - col_border0
col_remain_space = col_space-len(col_content)
halign = col.halign
if halign == 0: # left
col_content = col_content + ' '*col_remain_space
elif halign == 1: # center
col_left_pad = col_remain_space // 2
col_right_pad = col_remain_space - col_left_pad
col_content = ' '*col_left_pad + col_content + ' '*col_right_pad
elif halign == 2: # right
col_content = ' '*col_remain_space + col_content
line += col_content
if right_border is not None:
line += right_border
lines.append(line)
if len(sub_rows) != 0 and row_line is not None:
lines.append(row_line)
return '\n'.join(lines)
``` |
{
"source": "jkenney9a/Bioinformatics",
"score": 3
} |
#### File: Bioinformatics/Codon_analysis/Codon_analysis.py
```python
import sys, os, glob
from Bio import Entrez
from Bio import SeqIO
import csv
import string
import numpy as np
def get_gene_list(filename):
"""
Input: File with list of genes or list of gene output names from proteomics
Output: List of gene names in file
"""
gene_list = []
if filename.split('.')[-1] == 'csv':
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) > 0:
if "GN=" in row:
gene = get_gene_name(row)
elif ';' in row[0]:
for gene in row[0].split(';'):
gene = gene.strip()
else:
gene = row[0]
gene_list.append(gene)
elif filename.split('.')[-1] == 'txt':
f = open(filename)
for line in f:
if "GN=" in line:
gene = get_gene_name(line)
gene_list.append(gene)
else:
gene_list.append(line.rstrip("\n"))
f.close()
return gene_list
def get_gene_name(line):
"""
Input: A line read in from a txt or csv file from some proteomic data
that contains a 'GN=' part before the gene name
Output: The gene name pulled out of the line
"""
gene = ""
start = line.find("GN=")
while line[start+3] != " ":
gene += line[start+3]
start += 1
return gene
def get_CDS_fasta_files(gene, organism):
"""
Input: gene name and organism
Output: fasta file with CDS sequence from NCBI in fasta directory
If the file already exists does not download from NCBI
"""
if not os.path.exists("fasta"):
os.mkdir("fasta")
#Checks to see if the file has already been downloaded previously
#If it has not, then it will be downloaded, otherwise it will use the
#previously downloaded file. This speeds up processing time and minimizes
#the strain on NCBI resources
filename = gene + "_" + organism
if len(glob.glob("fasta\\" + filename + "*.fasta")) == 0:
#Get gene id from Gene database; exclude predicted sequences
search_term = gene + "[Gene Name] AND " + organism + \
"[Organism] AND mRNA[Filter] AND RefSeq[Filter] NOT PREDICTED[Title]"
search_handle = Entrez.esearch(db="nucleotide", term = search_term)
#Parse the resulting xml file into a dictionary and get gene ID numbers
#associated with specific gene records
search_record = Entrez.read(search_handle)
gene_ids = search_record["IdList"]
search_handle.close()
count = 1
#Gets the CDS file for each gene from Entrez and creates a fasta file
#for each gene entry
for g in gene_ids:
handle = Entrez.efetch(db="nucleotide", id=g, rettype="fasta_cds_na",\
retmode = "text")
record = SeqIO.read(handle, format="fasta")
SeqIO.write(record, "fasta\\" + filename + "_" + str(count)\
+ ".fasta", "fasta")
count += 1
def genefile_to_seq(gene, organism):
"""
Input: Gene and organism name for which a file exists
Output: List of sequence objects associated with that gene
"""
filename = gene + "_" + organism
filenames = glob.glob("fasta\\" + filename + "*.fasta")
sequences = []
for f in filenames:
sequences.append(SeqIO.read(f, "fasta"))
return sequences
def normalized_codon_mapping(codon_mapping):
"""
Input: Codon frequency mapping dictionary in form: {'codon':['AA',Freq])
Output: Normalized codon frequency map relative to max frequency for each
codon {'codon':'rel freq'}
Note: AA's in codon map must be encoded as single letters
"""
AA_List = ['A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P',\
'S','T','W','Y','V','*']
#Creates a dictionary mapping AA letter to frequencies associated with AA
AA_Freq_Dict = {x:[codon_mapping[y][1]
for y in codon_mapping if codon_mapping[y][0] == x] for x in AA_List}
#Dictionary mapping amino acid to maximum frequency value in codon_mapping
AA_Max_Freq = {x:max(AA_Freq_Dict[x]) for x in AA_Freq_Dict}
#Deal with zeros at stop codons:
for x in AA_Max_Freq:
if AA_Max_Freq[x] == 0:
AA_Max_Freq[x] = 1
#Dictionary mapping codon to relative codon frequency
return {x:[codon_mapping[x][0],(codon_mapping[x][1] / AA_Max_Freq[codon_mapping[x][0]])]
for x in codon_mapping}
def sequence_to_codons(sequence):
"""
Input: Biopython sequence object
Output: list containing codons in order
"""
codons = []
if len(sequence.seq) % 3 == 0:
for x in range(len(sequence.seq)/3):
codons.append(str(sequence.seq[x:x+3]))
x += 3
else:
return "Error, not a coding sequence"
return codons
def codon_map_gene(gene, organism, codon_map):
"""
Input: Gene name, organism and codon map
Output: A list of codon mappings for given gene
"""
sequences = genefile_to_seq(gene, organism)
gene_codons = []
for g in sequences:
gene_codons.append(sequence_to_codons(g))
gene_codon_maps = []
for x in gene_codons:
gene_codon_maps.append([codon_map[y][1] for y in x])
return gene_codon_maps
def import_codon_map(filename):
"""
Input: filename (csv w/ comma delimiter) containing a codon map where
the first column is codon, second column is amino acid abbreviation (one
letter) and third column is value (e.g, time to decode or relative
abundance etc.)
Output: A codon mapping dictionary with the format of
{codon: [AA, value]}
"""
codon_map = {}
with open(filename, "r") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row[0] = string.replace(row[0], "U", "T")
codon_map[row[0]] = [row[1], float(row[2])]
return codon_map
def gene_codon_analysis(gene, organism, codon_map):
"""
Input: gene name, organism, and codon mapping
This function assumes the mRNA fasta files have already been downloaded
Output: A dictionary of the format: {gene_name: name, avg: mappings divided by length
of transcript, total: sum of mappings, normalized: normalized mappings relative
to maximum value for each AA, range_ratio: a ratio of the range of differences
for protein lengths relative to maximum protein length, length_range:range of
protein lengths from downloaded transcripts}
"""
gene_codon_maps = codon_map_gene(gene, organism, codon_map)
codon_map_norm = normalized_codon_mapping(codon_map)
gene_codon_maps_norm = codon_map_gene(gene, organism,
codon_map_norm)
if len(gene_codon_maps) == 0:
return {"gene_name": gene}
#Calculate sums, lengths and averages for given codon mapping
codon_sums = []
protein_lengths = []
codon_first_25 = []
codon_first_third = []
for mapping in gene_codon_maps:
codon_sums.append(sum(mapping))
protein_lengths.append(len(mapping))
codon_first_25.append(sum(mapping[0:24]))
codon_first_third.append(sum(mapping[0:int(len(mapping)/3)]))
codon_averages = [float(sums)/lengths
for sums,lengths in zip(codon_sums, protein_lengths)]
codon_first_25_avgs = [float(sums)/25 for sums in codon_first_25]
codon_first_third_avgs = [float(sums)/int(lengths / 3) for sums, lengths in
zip(codon_first_third, protein_lengths)]
#Calculate normalized codon usage (i.e, relative to theoretical maximum)
codon_sums_norm = []
for mapping in gene_codon_maps_norm:
codon_sums_norm.append(sum(mapping) - 1) #-1 to deal with stop codon
codon_averages_norm = [float(sums)/lengths
for sums, lengths in zip(codon_sums_norm, protein_lengths)]
protein_length_min = min(protein_lengths)
protein_length_max = max(protein_lengths)
protein_length_median = np.median(protein_lengths)
output_avg = sum(codon_averages)/len(codon_averages)
output_total = sum(codon_sums)/len(codon_sums)
output_norm = sum(codon_averages_norm)/len(codon_averages_norm)
output_range_ratio = (protein_length_max - protein_length_min) / float(protein_length_max)
output_protein_size_range = str(protein_length_min) + " - " + str(protein_length_max)
output_first_25 = sum(codon_first_25_avgs)/len(codon_first_25_avgs)
output_first_third = sum(codon_first_third_avgs)/(len(codon_first_third_avgs))
output_dict = {"gene_name":gene, "avg":output_avg, "total":output_total,
"normalized":output_norm, "range_ratio":output_range_ratio,
"length_range":output_protein_size_range,
"median_protein_length":protein_length_median, "first_25_avg":
output_first_25, "first_third_avg": output_first_third}
return output_dict
if __name__ == "__main__":
import sys
download = False
Entrez.email = None
for arg in sys.argv[1:]:
try:
name, value = arg.split('=', 1)
except: print "Error parsing command line argument. No '=' found"
if name.lower() == "--gene_list" or name.lower() == "--list":
gene_list_filename = value
elif name.lower() == "--email":
Entrez.email = value
elif name.lower() == "--output":
output_filename = value
elif name.lower() == "--codon_map" or name.lower() == "--map":
codon_map_filename = value
elif name.lower() == "--download":
download = value
elif name.lower() == "--organism":
organism = value
gene_list = get_gene_list(gene_list_filename)
codon_map = import_codon_map(codon_map_filename)
if download == True or download == "T":
if Entrez.email == None:
print "Error. If downloading from NCBI need to provide \
email address using --email=<your email address here>"
sys.exit()
for gene in gene_list:
get_CDS_fasta_files(gene, organism)
with open(output_filename, 'wb') as csvfile:
fieldnames = ['gene_name', 'avg', 'total', 'normalized', 'range_ratio',
'length_range', 'median_protein_length', 'first_25_avg',
'first_third_avg']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, restval="ERROR")
writer.writeheader()
counter=0
for gene in gene_list:
writer.writerow(gene_codon_analysis(gene, organism, codon_map))
counter += 1
print counter, "of", len(gene_list),"genes analyzed! \r",
``` |
{
"source": "jkent/jkent.net",
"score": 2
} |
#### File: jkent.net/jkent_net/__init__.py
```python
from .repository import Repository
from .ext.security import ExtendedRegisterForm
from flask import Flask, g, render_template
from flask_mail import Mail
from flask_security import Security, SQLAlchemyUserDatastore
import os
mail = Mail()
def create_app():
global security, user_datastore
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
if os.environ.get('FLASK_ENV') == 'development':
app.config['SECRET_KEY'] = 'development'
app.config.from_mapping(
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(app.instance_path, 'jkent_net.db'),
SQLALCHEMY_TRACK_MODIFICATIONS = False,
)
app.config.from_pyfile('config.py', silent=True)
app.repository_path = os.path.join(app.instance_path, 'repository')
app.repository = Repository(app.repository_path)
app.cache_root = os.path.join(app.instance_path, 'cache')
from . import models
models.init_app(app)
mail.init_app(app)
user_datastore = SQLAlchemyUserDatastore(models.db, models.User, models.Role)
security = Security(app, user_datastore, register_form=ExtendedRegisterForm)
from . import views
views.init_app(app)
from . import cli
cli.init_app(app)
return app
```
#### File: jkent.net/jkent_net/repository.py
```python
from io import BytesIO
import os
import subprocess
class Repository:
def __init__(self, path):
self._path = path
if not os.path.exists(os.path.join(self._path, '.git')):
os.makedirs(self._path, exist_ok=True)
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'init'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
@property
def path(self):
return self._path
def exists(self, path, version='HEAD'):
try:
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'cat-file',
'-e', '{}:{}'.format(version, path)], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return False
return True
def read(self, path, version='HEAD'):
if not version:
if not os.path.isfile(os.path.join(self._path, path)):
return None
try:
file = open(os.path.join(self._path, path), 'rb')
except FileNotFoundError:
return None
return file
else:
try:
data = subprocess.check_output(['/usr/bin/git', '-C',
self._path, 'cat-file', '-p',
'{}:{}'.format(version, path)], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return None
return BytesIO(data)
def write(self, path, data):
with open(os.path.join(self._path, path), 'wb') as f:
f.write(data)
def checkout(self, path, version='HEAD'):
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'checkout',
version, '--', path])
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'clean', '-fd',
'--', path])
def add(self, path='.'):
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'add', path])
def commit(self, message=''):
command = ['/usr/bin/git', '-C', self._path, 'commit', '-q',
'--allow-empty-message', '-m', message]
subprocess.check_call(command)
def history(self, path=None, version='HEAD', num=None):
if not version:
return []
try:
result = subprocess.check_output(['/usr/bin/git', '-C', self._path,
'log'] + (['-n{}'.format(num)] if num else []) +
['--pretty=format:%H %at %ae %s', version, '--', path],
stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return ''
results = []
for line in result.rstrip().decode('utf8').split('\n'):
if line:
line = tuple(line.split(' ', 3))
if self.exists(path, line[0]):
results.append(line)
return results
def list(self, path, version='HEAD', recursive=False):
if version:
try:
prefix = os.path.dirname(path)
result = subprocess.check_output(['/usr/bin/git', '-C',
self._path, 'ls-tree'] + (['-r'] if recursive else []) +
['--name-only', version, '--', path],
stderr=subprocess.DEVNULL)
paths = result.rstrip().split(b'\n')
except subprocess.CalledProcessError:
paths = []
else:
prefix = os.path.dirname(os.path.join(self._path, path))
paths = []
for root, dirs, files in os.walk(os.path.join(self._path, path)):
for dir in dirs:
dirname = os.path.normpath(os.path.join(root, dir)) + os.sep
paths.append(dirname.encode('utf8'))
if recursive:
dirs[:] = [d for d in dirs if not d.startswith('.')]
else:
dirs[:] = []
for file in files:
filename = os.path.normpath(os.path.join(root, file))
paths.append(filename.encode('utf8'))
paths = list(map(lambda x: x[len(prefix) + 1:], paths))
paths.sort()
return paths
def isdir(self, path, version='HEAD'):
if version:
try:
result = subprocess.check_output(['/usr/bin/git', '-C',
self._path, 'ls-tree', '-r', '--name-only', version, '--',
path], stderr=subprocess.DEVNULL)
return result.rstrip().split(b'\n')[0].decode('utf8') != path
except subprocess.CalledProcessError:
return False
except:
return False
else:
return os.path.isdir(os.path.join(self._path, path))
def diff(self, path, version1=None, version2=None):
if version1 == version2:
return False
try:
if version1 == None:
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'diff',
'--exit-code', '--quiet', version2, '--', path])
elif version2 == None:
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'diff',
'--exit-code', '--quiet', '-R', version1, '--', path])
else:
subprocess.check_call(['/usr/bin/git', '-C', self._path, 'diff',
'--exit-code', '--quiet', version1, version2, '--', path])
except subprocess.CalledProcessError:
return True
return False
```
#### File: jkent_net/views/login_github.py
```python
from .. import user_datastore
from ..models import db, OAuth, User
from flask import flash
from flask_dance.consumer import oauth_authorized
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from flask_dance.contrib.github import make_github_blueprint
from flask_security import current_user, login_user
__all__ = ['bp']
bp = make_github_blueprint(scope='user:email')
bp.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user)
@oauth_authorized.connect_via(bp)
def github_logged_in(blueprint, token):
if not token:
flash('Failed to login with GitHub', category='error')
return False
resp = blueprint.session.get('/user')
if not resp.ok:
flash('Failed to fetch user info from GitHub', category='error')
user_info = resp.json()
resp = blueprint.session.get('/user/emails')
if not resp.ok:
flash('Failed to fetch email info from GitHub', category='error')
return False
entries = resp.json()
entries = sorted(entries, key=lambda e: not e['primary'])
entries = list(filter(lambda e: e['verified'], entries))
if not entries:
flash('No email addresses have been verified by GitHub', category='error')
return False
user = None
for entry in entries:
user = user_datastore.find_user(email=entry['email'])
if user:
break
if not user:
user = user_datastore.create_user(
email = entries[0]['email'],
name = user_info['name']
)
oauth = OAuth.query.filter_by(
provider = blueprint.name,
user = user,
).first()
if not oauth:
oauth = OAuth(
provider = blueprint.name,
user = user,
token = token,
)
db.session.add_all([user, oauth])
db.session.commit()
user.init_avatar(user, user_info['avatar_url'] + '&s=64')
login_user(user)
return False
``` |
{
"source": "jkent/pybot",
"score": 2
} |
#### File: pybot/pybot/bot.py
```python
from textwrap import wrap
from time import time
from . import config
from .client import Client
from .decorators import hook, priority
from .hook import HookManager, TimestampHook
from .plugin import PluginManager
class Bot(Client):
def __init__(self, core, network):
self.core = core
self.network = network
host = config.config[self.network].get('host')
port = config.config[self.network].get('port')
ssl = config.config[self.network].get('ssl', 'false')
if port == None:
port = 6697 if ssl else 6667
Client.__init__(self, (host, port), ssl)
self.hooks = HookManager(self)
self.plugins = PluginManager(self)
self.hooks.install_owner(self)
self.nick = None
self.channels = {}
self.allow_rules = {'*': {'ANY': 1}}
self.deny_rules = {}
for plugin in config.autoload_list(self):
self.plugins.load(plugin)
self.connect()
def set_timer(self, fn, timestamp, owner=None):
hook = TimestampHook(timestamp)
hook.bind(fn, owner)
self.hooks.install(hook)
return hook
def set_interval(self, fn, seconds, owner=None):
hook = TimestampHook(time() + seconds, {'repeat': seconds})
hook.bind(fn, owner)
self.hooks.install(hook)
return hook
def set_timeout(self, fn, seconds, owner=None):
hook = TimestampHook(time() + seconds)
hook.bind(fn, owner)
self.hooks.install(hook)
return hook
def do_tick(self, timestamp):
self.hooks.call_timestamp(timestamp)
def privmsg(self, target, text):
wraplen = 510
wraplen -= 1 + len(self.nick) # ":<nick>"
wraplen -= 1 + 10 # "!<user>"
wraplen -= 1 + 63 # "@<host>"
wraplen -= 9 # " PRIVMSG "
wraplen -= len(target) # "<target>"
wraplen -= 2 # " :"
for line in wrap(text, wraplen):
self.send('PRIVMSG %s :%s' % (target, line))
def notice(self, target, text):
wraplen = 510
wraplen -= 1 + len(self.nick) # ":<nick>"
wraplen -= 1 + 10 # "!<user>"
wraplen -= 1 + 63 # "@<host>"
wraplen -= 8 # " NOTICE "
wraplen -= len(target) # "<target>"
wraplen -= 2 # " :"
for line in wrap(text, wraplen):
self.send('NOTICE %s :%s' % (target, line))
def join(self, channels, keys=None):
if isinstance(channels, str):
channels = (channels,)
channels = list(map(str.lower, channels))
if channels:
channel_s = ','.join(channels)
if keys:
if isinstance(keys, str):
keys = (keys,)
key_s = ','.join(keys)
self.send('JOIN %s %s' % (channel_s, key_s))
pairs = list(zip(channels, keys))
for item in pairs:
self.channels[item[0]] = {'key': item[1], 'joined': False,
'nicks': set()}
else:
self.send('JOIN %s' % channel_s)
for channel in channels:
self.channels[channel] = {'joined': False, 'nicks': set()}
def part(self, channels, message=None):
if type(channels) == str:
channels = (channels,)
if channels:
channels = ','.join(channels)
if message:
self.send('PART %s :%s' % (channels, message))
else:
self.send('PART %s' % channels)
@hook
@priority(0)
def disconnect_event(self):
for _, props in list(self.channels.items()):
props['joined'] = False
props['nicks'].clear()
@hook
@priority(0)
def shutdown_event(self, reason):
self.send('QUIT :%s' % reason)
for name in self.plugins.list():
self.plugins.unload(name, True)
@hook
def _001_command(self, msg):
self.server = msg.source
self.nick = msg.param[0]
@hook
def _353_command(self, msg):
channel = msg.param[2].lower()
if channel in self.channels and self.channels[channel]['joined']:
#if 'nicks' not in self.channels[channel]:
# self.channels[channel]['nicks'] = set()
nicks = self.channels[channel]['nicks']
for nick in msg.param[-1].split():
if nick.startswith(('~', '&', '@', '%', '+')):
nicks.add(nick[1:])
else:
nicks.add(nick)
@hook
def join_command(self, msg):
channel = msg.param[0].lower()
if msg.source == self.nick:
if channel not in self.channels:
self.channels[channel] = {}
self.channels[channel]['name'] = msg.param[0]
self.channels[channel]['joined'] = True
elif channel in self.channels:
self.channels[channel]['nicks'].add(msg.source)
@hook
def kick_command(self, msg):
channel = msg.param[0].lower()
if msg.param[1] == self.nick:
if channel in self.channels:
self.channels[channel]['joined'] = False
if 'nicks' in self.channels[channel]:
self.channels[channel]['nicks'].clear()
elif channel in self.channels:
self.channels[channel]['nicks'].remove(msg.source)
@hook
def nick_command(self, msg):
new_nick = msg.param[0].lower()
if msg.source == self.nick:
self.nick = new_nick
for _, props in list(self.channels.items()):
if 'nicks' in props and msg.source in props['nicks']:
props['nicks'].remove(msg.source)
props['nicks'].add(new_nick)
@hook
@priority(0)
def part_command(self, msg):
channel = msg.param[0].lower()
if msg.source == self.nick:
if channel in self.channels:
self.channels[channel]['joined'] = False
if 'nicks' in self.channels[channel]:
self.channels[channel]['nicks'].clear()
elif channel in self.channels:
self.channels[channel]['nicks'].remove(msg.source)
@hook
def ping_command(self, msg):
self.send('PONG :%s' % msg.param[-1])
@hook
@priority(0)
def quit_command(self, msg):
for _, props in list(self.channels.items()):
if 'nicks' in props and msg.source in props['nicks']:
props['nicks'].remove(msg.source)
```
#### File: pybot/pybot/config.py
```python
from collections import OrderedDict
from .yaml import yaml
config = OrderedDict()
def load(core):
global config
with open(core.config_path) as f:
config = yaml.load(f)
def autoload_list(bot):
global config
plugins = ['base']
for name, options in config[bot.network].get('plugins',
OrderedDict()).items():
if name not in plugins and \
(not options or options.get('autoload', True)):
plugins.append(name)
return plugins
def plugin_options(bot, plugin):
global config
plugin = config[bot.network].get('plugins', OrderedDict()).get(plugin)
if plugin:
return plugin
return OrderedDict()
```
#### File: pybot/pybot/hook.py
```python
import bisect
import inspect
import re
import traceback
from .message import Message
url_re = re.compile(
'(!)?https?://[^ /]+\.[^ /]+(?:/[^ ]*)?'
)
domain_re = re.compile('https?://(?:www\.)?([^ /]+\.[^ /]+)')
class Hook(object):
def __init__(self, sort, extra={}):
self.sort = sort
self.extra = extra
def __call__(self, *args):
if not hasattr(self, 'fn'):
Exception('attempt to call an unbound hook')
return
try:
return self.fn(*args[:self.nargs])
except:
print('%s hook error:' % type(self))
traceback.print_exc()
def __lt__(self, other):
return self.sort < other.sort
def bind(self, fn, owner=None):
if owner:
self.owner = owner
elif hasattr(fn, '__self__'):
self.owner = fn.__self__
else:
raise Exception('unable to bind hook, no owner!')
self.fn = fn
if inspect.ismethod(fn):
self.nargs = self.fn.__func__.__code__.co_argcount - 1
self.__func__ = self.fn.__func__
else:
self.nargs = self.fn.__code__.co_argcount
self.__func__ = self.fn
if not hasattr(self.__func__, '_priority'):
self.__func__._priority = getattr(self.owner, 'default_priority', 500)
if not hasattr(self.__func__, '_level'):
self.__func__._level = getattr(self.owner, 'default_level', 1)
class EventHook(Hook):
def __init__(self, event):
Hook.__init__(self, event)
class CommandHook(Hook):
def __init__(self, command):
Hook.__init__(self, command.upper())
class TriggerHook(Hook):
def __init__(self, trigger):
if type(trigger) == str:
l = trigger.split()
else:
l = trigger
Hook.__init__(self, (len(l),) + tuple(l))
class TimestampHook(Hook):
def __init__(self, timestamp, extra={}):
Hook.__init__(self, timestamp, extra)
class UrlHook(Hook):
def __init__(self, domain):
Hook.__init__(self, domain)
class HookManager:
def __init__(self, bot):
self.bot = bot
self.event_hooks = []
self.command_hooks = []
self.trigger_hooks = []
self.timestamp_hooks = []
self.url_hooks = []
def install(self, hook):
if not isinstance(hook, Hook):
raise Exception('hook is not a Hook instance')
if not hasattr(hook, 'fn') or not hasattr(hook, 'owner'):
raise Exception('hook not bound')
default_priority = getattr(hook.owner, 'default_priority', 100)
default_level = getattr(hook.owner, 'default_level', 1)
hook.priority = getattr(hook.fn, '_priority', default_priority)
hook.level = getattr(hook.fn, '_level', default_level)
d = {EventHook: self.event_hooks,
CommandHook: self.command_hooks,
TriggerHook: self.trigger_hooks,
TimestampHook: self.timestamp_hooks,
UrlHook: self.url_hooks}
l = d.get(type(hook), None)
if l == None:
raise Exception('unsupported hook class: %s' % type(hook))
bisect.insort_right(l, hook)
def install_owner(self, owner):
for _, method in inspect.getmembers(owner, inspect.ismethod):
hooks = getattr(method.__func__, '_hooks', [])
for hook in hooks:
hook.bind(method, owner)
self.install(hook)
def uninstall(self, hook):
d = {EventHook: self.event_hooks,
CommandHook: self.command_hooks,
TriggerHook: self.trigger_hooks,
TimestampHook: self.timestamp_hooks,
UrlHook: self.url_hooks}
l = d.get(type(hook), [])
l.remove(hook)
def uninstall_owner(self, owner):
for l in [self.event_hooks,
self.command_hooks,
self.trigger_hooks,
self.timestamp_hooks,
self.url_hooks]:
l[:] = (h for h in l if h.owner != owner)
def find(self, model):
d = {EventHook: self.event_hooks,
CommandHook: self.command_hooks,
TriggerHook: self.trigger_hooks,
TimestampHook: self.timestamp_hooks,
UrlHook: self.url_hooks}
l = d.get(type(model), [])
if isinstance(model, TimestampHook):
left = 0
else:
left = bisect.bisect_left(l, model)
right = bisect.bisect_right(l, model)
hook_seq = l[left:right]
hook_seq.sort(key=lambda h: -h.fn._priority)
return hook_seq
def call(self, hook_seq, *args):
for hook in hook_seq:
if isinstance(hook, TimestampHook):
self.uninstall(hook)
repeat = hook.extra.get('repeat', None)
if repeat:
hook.sort += repeat
self.install(hook)
if hook(*args):
return True
def call_event(self, event, *args):
hooks = self.find(EventHook(event))
self.call(hooks, *args)
if event == 'recv':
msg = Message(args[0], self.bot)
self.call_command(msg)
def call_command(self, msg):
if msg.cmd in ('NOTICE', 'PRIVMSG'):
self.apply_permissions(msg)
if msg.cmd == 'PRIVMSG':
self.process_privmsg(msg)
hooks = self.find(CommandHook(msg.cmd))
self.call(hooks, msg)
def apply_permissions(self, msg):
msg.permissions = {}
for pattern, rules in list(self.bot.allow_rules.items()):
regex = '^' + re.escape(pattern).replace('\\*', '.*') + '$'
if not re.match(regex, msg.prefix):
continue
for plugin, level in list(rules.items()):
current_level = msg.permissions.get(plugin, level)
msg.permissions[plugin] = max(level, current_level)
for pattern, rules in list(self.bot.deny_rules.items()):
regex = '^' + re.escape(pattern).replace('\\*', '.*') + '$'
if not re.match(regex, msg.prefix):
continue
for plugin, level in list(rules.items()):
if plugin == 'ANY':
for plugin, current_level in list(msg.permissions.items()):
msg.permissions[plugin] = min(level, current_level)
continue
current_level = msg.permissions.get(plugin, level)
msg.permissions[plugin] = min(level, current_level)
def process_privmsg(self, msg):
if msg.trigger:
self.call_trigger(msg)
elif msg.channel:
for match in url_re.finditer(msg.param[1]):
if match.group(1):
continue
url = match.group(0)
self.call_url(msg, url)
def call_trigger(self, msg):
authorized = True
num_words = len(msg.trigger.split())
for depth in range(num_words, 0, -1):
parts = tuple(msg.trigger.split(None, depth))
hooks = self.find(TriggerHook(parts[:depth]))
n = len(hooks)
hooks[:] = [h for h in hooks if
h.fn._level <= msg.permissions.get(h.fn.__self__.name, msg.permissions.get('ANY', 0))]
if len(hooks) < n:
authorized = False
if not hooks:
pass
targstr = parts[depth] if len(parts) > depth else ''
targs = (' '.join(parts[:depth]),) + tuple(targstr.split())
if self.call(hooks, msg, targs, targstr):
break
if not authorized:
msg.reply("You don't have permission to use that trigger")
def call_timestamp(self, timestamp):
hooks = self.find(TimestampHook(timestamp))
self.call(hooks, timestamp)
def call_url(self, msg, url):
match = domain_re.match(url)
if not match:
return
domain = match.group(1).lower()
hooks = self.find(UrlHook(domain))
if self.call(hooks, msg, domain, url):
return True
hooks = self.find(UrlHook('any'))
self.call(hooks, msg, domain, url)
```
#### File: pybot/pybot/message.py
```python
import re
from datetime import datetime
from . import config
message_re = re.compile(
'^(?:' +
':(?P<prefix>' +
'(?P<source>[^ !@]+)' +
'(?:' +
'(?:!(?P<user>[^ @]+))?' +
'@(?P<host>[^ ]+)' +
')?' +
') ' +
')?' +
'(?P<cmd>[^ :]+)' +
'(?: (?P<params>.+))?$'
)
def parse_params(params):
l = []
while params:
if params[0] == ':':
l.append(params[1:])
break
if len(l) == 14:
l.append(params)
break
param, _, params = params.partition(' ')
l.append(param)
return l
def parse_message(message):
match = message_re.match(message)
if match:
d = match.groupdict()
d['cmd'] = d['cmd'].upper()
d['param'] = parse_params(d['params'])
del d['params']
else:
d = {'prefix': None, 'source': None, 'user': None, 'host': None,
'command': '', 'param': []}
return d
class Message(object):
def __init__(self, line, bot=None):
self.bot = bot
self.raw = line
self.reply_to = None
self.time = datetime.utcnow()
self.channel = None
self.trigger = None
self.permissions = {}
self.__dict__.update(parse_message(line))
if self.cmd in ('PRIVMSG', 'NOTICE'):
if self.param[0].startswith(('&', '#', '+', '!')):
self.channel = self.param[0].lower()
self.reply_to = self.param[0]
else:
self.reply_to = self.source
if self.cmd == 'PRIVMSG':
self._detect_trigger()
def _detect_trigger(self):
text = self.param[-1]
directed_triggers = config.config[self.bot.network] \
.get('directed_triggers', False)
if directed_triggers:
if self.channel:
if text.lower().startswith(self.bot.nick.lower()):
nicklen = len(self.bot.nick)
if len(text) > nicklen and text[nicklen] in [',', ':']:
self.trigger = text[nicklen + 1:]
else:
self.trigger = text
else:
if text.startswith('!'):
self.trigger = text[1:]
def reply(self, text, direct=False):
if not self.bot:
raise Exception('No bot object bound')
if not self.reply_to and not self.source:
raise Exception('Nobody to reply to')
direct |= not bool(self.reply_to)
recipient = self.source if direct else self.reply_to
self.bot.privmsg(recipient, text)
```
#### File: pybot/plugins/song.py
```python
import os
import re
import sqlite3
from traceback import print_exc
from pybot.plugin import *
KEEP_RATIO = 0.1
class Plugin(BasePlugin):
def on_load(self):
self.db = sqlite3.connect(os.path.join(self.bot.core.data_path, 'song.db'))
self.cur = self.db.cursor()
query = '''CREATE TABLE IF NOT EXISTS artist (
id INTEGER PRIMARY KEY,
name TEXT,
UNIQUE(name)
);'''
self.cur.execute(query)
query = '''CREATE TABLE IF NOT EXISTS track (
id INTEGER PRIMARY KEY,
artist_id INTEGER,
name TEXT,
nick TEXT,
youtube TEXT,
UNIQUE(artist_id, name),
FOREIGN KEY(artist_id) REFERENCES artist(id)
);'''
self.cur.execute(query)
query = '''CREATE TRIGGER IF NOT EXISTS delete_unused_artist
AFTER DELETE ON track
BEGIN
DELETE FROM artist WHERE id = OLD.artist_id AND
(SELECT COUNT(*) FROM track WHERE artist_id = OLD.artist_id) = 0;
END;'''
self.cur.execute(query)
self.db.commit()
self.last_tracks = {}
def on_unload(self):
self.db.close()
def add_track(self, artist, title, nick=None):
track_added = False
query = '''SELECT id FROM artist
WHERE name = ? COLLATE NOCASE;'''
self.cur.execute(query, (artist,))
row = self.cur.fetchone()
if row is not None:
artist_id, = row
else:
query = '''INSERT INTO artist (name)
VALUES (?);'''
self.cur.execute(query, (artist,))
artist_id = self.cur.lastrowid
query = '''SELECT id FROM track
WHERE artist_id = ? AND name = ? COLLATE NOCASE;'''
self.cur.execute(query, (artist_id, title))
row = self.cur.fetchone()
if row is not None:
track_id, = row
else:
query = '''INSERT INTO track (artist_id, name, nick)
VALUES (?, ?, ?);'''
self.cur.execute(query, (artist_id, title, nick))
track_id = self.cur.lastrowid
track_added = True
return track_id, track_added
@hook
def song_trigger(self, msg, args, argstr):
context = msg.reply_to
if argstr:
msg.reply('Unknown command, see help.')
return
while True:
query = '''SELECT track.id, artist.name, track.name, track.youtube
FROM track
JOIN artist ON artist_id = artist.id
ORDER BY RANDOM()
LIMIT 1;'''
self.cur.execute(query)
row = self.cur.fetchone()
if row is None:
msg.reply('No songs yet!')
return
track_id, artist, track, youtube = row
if context not in self.last_tracks or track_id not in self.last_tracks[context]:
break
if youtube is not None:
msg.reply('%s - %s - https://youtu.be/%s' % (artist, track, youtube))
else:
msg.reply('%s - %s' % (artist, track))
query = '''SELECT COUNT(*)
FROM track;'''
self.cur.execute(query)
count, = self.cur.fetchone()
keep = int(count * KEEP_RATIO)
if context not in self.last_tracks:
self.last_tracks[context] = []
self.last_tracks[context].append(track_id)
self.last_tracks[context] = self.last_tracks[context][-keep:]
@hook
def song_add_trigger(self, msg, args, argstr):
context = msg.reply_to
try:
artist, title = argstr.strip().split(' - ', 1)
except:
msg.reply('Song must be in "artist - track" format')
return True
track_id, track_added = self.add_track(artist, title, msg.source)
self.db.commit()
query = '''SELECT COUNT(*)
FROM track;'''
self.cur.execute(query)
count, = self.cur.fetchone()
keep = int(count * KEEP_RATIO)
if context not in self.last_tracks:
self.last_tracks[context] = []
self.last_tracks[context].append(track_id)
self.last_tracks[context] = self.last_tracks[context][-keep:]
if track_added:
msg.reply('Song was added.')
else:
msg.reply("That song already exists!")
return True
@level(500)
@hook
def song_delete_trigger(self, msg):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''DELETE FROM track
WHERE id = ?;'''
self.cur.execute(query, (track_id,))
self.db.commit()
del self.last_tracks[context][-1]
msg.reply('Song deleted')
return True
@hook
def song_fix_artist_trigger(self, msg, args, argstr):
context = msg.reply_to
if self.last_tracks.get(context) is None:
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''SELECT artist.id, artist.name
FROM track
JOIN artist ON artist_id = artist.id
WHERE track.id = ?;'''
self.cur.execute(query, (track_id,))
row = self.cur.fetchone()
original_artist_id, original_artist_name = row
if original_artist_name == argstr:
msg.reply('No change.')
return True
query = '''SELECT id
FROM artist
WHERE name = ?
LIMIT 1;'''
self.cur.execute(query, (argstr,))
row = self.cur.fetchone()
if row:
artist_id, = row
query = '''UPDATE track
SET artist_id = ?
WHERE id = ?;'''
self.cur.execute(query, (artist_id, track_id))
query = '''SELECT COUNT(*)
FROM track
WHERE artist_id = ?;'''
self.cur.execute(query, (original_artist_id,))
row = self.cur.fetchone()
count, = row
if count == 0:
query = '''DELETE FROM artist
WHERE id = ?;'''
self.cur.execute(query, (original_artist_id,))
else:
query = '''UPDATE artist
SET name = ?
WHERE id = ?;'''
self.cur.execute(query, (argstr, original_artist_id))
msg.reply('Artist updated.')
return True
@hook
def song_fix_title_trigger(self, msg, args, argstr):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''SELECT name
FROM track
WHERE id = ?;'''
self.cur.execute(query, (track_id,))
row = self.cur.fetchone()
original_track_name, = row
if original_track_name == argstr:
msg.reply('No change.')
return True
query = '''UPDATE track
SET name = ?
WHERE id = ?;'''
self.cur.execute(query, (argstr, track_id))
self.db.commit()
msg.reply('Title updated.')
return True
@hook
def song_last_trigger(self, msg):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''SELECT track.id, artist.name, track.name, track.youtube
FROM track
JOIN artist ON artist_id = artist.id
WHERE track.id = ?;'''
self.cur.execute(query, (track_id,))
row = self.cur.fetchone()
track_id, artist, track, youtube = row
if youtube is not None:
msg.reply('%s - %s - https://youtu.be/%s' % (artist, track, youtube))
else:
msg.reply('%s - %s' % (artist, track))
return True
@level(1000)
@hook
def song_load_trigger(self, msg, args, argstr):
try:
count = 0
filepath = os.path.join(self.bot.core.data_path, argstr)
with open(filepath) as f:
for line in f:
line = line.strip()
parts = line.split(' - ', 2)
artist, title, nick = map(str.strip, parts)
_, sucess = self.add_track(artist, title, nick)
if sucess:
count += 1
self.db.commit()
except:
print_exc()
msg.reply('Failed to read file.')
return True
msg.reply('Loaded %d songs sucessfully.' % (count,))
return True
@hook
def song_search_trigger(self, msg, args, argstr):
context = msg.reply_to
query = '''SELECT track.id, track.youtube, artist.name || ' - ' || track.name AS song
FROM track
JOIN artist ON artist_id = artist.id
WHERE song LIKE ?
ORDER BY RANDOM()
LIMIT 5;'''
self.cur.execute(query, ('%%%s%%' % (argstr,),))
rows = self.cur.fetchall()
if not rows:
msg.reply('No tracks found.')
return True
for row in rows:
track_id, youtube, song = row
if youtube is not None:
msg.reply('%s - https://youtu.be/%s' % (song, youtube))
else:
msg.reply(song)
query = '''SELECT COUNT(*)
FROM track;'''
self.cur.execute(query)
count, = self.cur.fetchone()
keep = int(count * KEEP_RATIO)
if context not in self.last_tracks:
self.last_tracks[context] = []
self.last_tracks[context].append(track_id)
self.last_tracks[context] = self.last_tracks[context][-keep:]
return True
@hook
def song_stats_trigger(self, msg):
query = '''SELECT COUNT(*)
FROM artist;'''
self.cur.execute(query)
artist_count, = self.cur.fetchone()
query = '''SELECT COUNT(*)
FROM track;'''
self.cur.execute(query)
track_count, = self.cur.fetchone()
msg.reply('There are %d artists with %d tracks.' % (artist_count, track_count))
return True
@hook
def song_who_trigger(self, msg):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''SELECT nick FROM track
WHERE id = ?;'''
self.cur.execute(query, (track_id,))
nick, = self.cur.fetchone()
if not nick:
nick = 'anonymous'
msg.reply('Added by %s' % (nick,))
return True
@hook(('song youtube', 'song yt'))
def song_youtube_trigger(self, msg, args, argstr):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
pattern = '^(?:https?://)?(?:www\.)?(?:youtu\.be/|youtube\.com(?:/embed/|/v/|/watch\?v=))([\w-]{10,12})(?:&.*)?$'
m = re.match(pattern, argstr.strip())
if not m:
msg.reply('That is not a valid youtube URL!')
return True
track_id = self.last_tracks[context][-1]
youtube_id = m.group(1)
query = '''UPDATE track
SET youtube = ?
WHERE id = ?;'''
self.cur.execute(query, (youtube_id, track_id))
self.db.commit()
msg.reply('Youtube link set!')
return True
@level(900)
@hook(('song youtube delete', 'song yt delete'))
def song_youtube_delete_trigger(self, msg):
context = msg.reply_to
if not self.last_tracks.get(context):
msg.reply('No last track.')
return True
track_id = self.last_tracks[context][-1]
query = '''UPDATE track
SET youtube = NULL
WHERE id = ?;'''
self.cur.execute(query, (track_id,))
self.db.commit()
return True
```
#### File: pybot/plugins/twitter.py
```python
import re
import requests
import tweepy
from html.parser import HTMLParser
from pybot.plugin import *
def tweet_cleaner(text):
hp = HTMLParser()
return hp.unescape(text.replace('\n', ' ').replace('\r', ''))
def url_expander(sentence, msg):
regex_tco = re.compile(r'https?://t.co/.*')
urls = []
words = sentence.split()
for word in words:
m = re.match(regex_tco, word)
if m:
idx = words.index(word)
r = requests.get(word)
if r.status_code in [200, 301, 302]:
msg.reply(r.url)
class Plugin(BasePlugin):
def on_load(self):
auth = tweepy.OAuthHandler(self.config.get('apikey'),
self.config.get('secret'))
auth.set_access_token(self.config.get('auth_token'),
self.config.get('auth_secret'))
self._api = tweepy.API(auth)
@hook('twitter.com')
def twitter_url(self, msg, args, argstr):
regx = re.compile(r'https?://twitter.com/[a-zA-Z0-9_\-]+/status/' \
r'(?P<id>[0-9]+)')
m = re.match(regx, argstr)
if not m:
return
else:
twitter_id = m.group('id')
try:
status = self._api.get_status(twitter_id, tweet_mode='extended')
msg.reply(tweet_cleaner(status.full_text))
url_expander(status.full_text, msg)
except tweepy.TweepError as e:
msg.reply('No Status for that ID.')
return True
@hook
def twitter_user_trigger(self, msg, args, argstr):
try:
user = self._api.get_user(argstr, tweet_mode='extended')
msg.reply(tweet_cleaner(user.status.full_text))
url_expander(user.status.full_text, msg)
except tweepy.TweepError as e:
print(e)
msg.reply('No user by that name.')
@hook
def twitter_help_trigger(self, msg, args, argstr):
msg.reply('Usage: twitter [search|user] <text> Returns most recent ' \
'or specified by URL Tweet text.')
@hook
def twitter_search_trigger(self, msg, args, argstr):
try:
cursor = tweepy.Cursor(self._api.search, q=argstr, rpp=1,
tweet_mode='extended')
for c in cursor.items(1):
uname = c.author.name
msg.reply('@{0}: {1}'.format(uname, tweet_cleaner(c.full_text)))
url_expander(c.full_text, msg)
break
else:
msg.reply('No results.')
except tweepy.TweepError as e:
print(e)
msg.reply('Update failed.')
```
#### File: pybot/pybot/yaml.py
```python
from collections import OrderedDict
from ruamel.yaml import YAML
def _constr_dict(constructor, node):
od = OrderedDict()
for key_node, value_node in node.value:
key = constructor.construct_object(key_node)
value = constructor.construct_object(value_node)
od[key] = value
return od
def _repr_dict(representer, data):
return representer.represent_mapping(
yaml.resolver.DEFAULT_MAPPING_TAG, data)
yaml = YAML()
yaml.constructor.add_constructor(yaml.resolver.DEFAULT_MAPPING_TAG,
_constr_dict)
yaml.representer.add_representer(OrderedDict, _repr_dict)
``` |
{
"source": "jkentwhite/through-a-data-point",
"score": 3
} |
#### File: through-a-data-point/code/controller.py
```python
import sys
import RPi.GPIO as GPIO
import time
import threading
from subprocess import Popen
# CONSTANTS
VIDEO_FILE = "legal_landscape.mp4"
GPIO.setmode(GPIO.BCM)
MOTOR_A_A = 18
MOTOR_A_B = 23
MOTOR_A_C = 24
MOTOR_A_D = 25
MOTOR_B_A = 4
MOTOR_B_B = 17
MOTOR_B_C = 27
MOTOR_B_D = 22
MOTOR_C_A = 12
MOTOR_C_B = 16
MOTOR_C_C = 20
MOTOR_C_D = 21
MOTOR_D_A = 6
MOTOR_D_B = 13
MOTOR_D_C = 19
MOTOR_D_D = 26
class Motor:
def __init__(self, _id, _a, _b, _c, _d):
self.id = _id
self.pins = [_a, _b, _c, _d]
def setStep(self, values):
GPIO.output(self.pins[0], values[0])
GPIO.output(self.pins[1], values[1])
GPIO.output(self.pins[2], values[2])
GPIO.output(self.pins[3], values[3])
def up(self, delay, steps):
for i in range(0, steps):
self.setStep([1, 0, 1, 0])
time.sleep(delay)
self.setStep([0, 1, 1, 0])
time.sleep(delay)
self.setStep([0, 1, 0, 1])
time.sleep(delay)
self.setStep([1, 0, 0, 1])
time.sleep(delay)
def down(self, delay, steps):
for i in range(0, steps):
self.setStep([1, 0, 0, 1])
time.sleep(delay)
self.setStep([0, 1, 0, 1])
time.sleep(delay)
self.setStep([0, 1, 1, 0])
time.sleep(delay)
self.setStep([1, 0, 1, 0])
time.sleep(delay)
def move(self, dir, delay, steps):
if dir == "up":
t = threading.Thread(target=self.up, args=(delay, steps))
t.start()
else:
t = threading.Thread(target=self.down, args=(delay, steps))
t.start()
def reset(self):
self.setStep([0, 0, 0, 0])
motors = []
motors.append(Motor("A", MOTOR_A_A, MOTOR_A_B, MOTOR_A_C, MOTOR_A_D))
motors.append(Motor("B", MOTOR_B_A, MOTOR_B_B, MOTOR_B_C, MOTOR_B_D))
motors.append(Motor("C", MOTOR_C_A, MOTOR_C_B, MOTOR_C_C, MOTOR_C_D))
motors.append(Motor("D", MOTOR_D_A, MOTOR_D_B, MOTOR_D_C, MOTOR_D_D))
GPIO.setup(MOTOR_A_A, GPIO.OUT)
GPIO.setup(MOTOR_A_B, GPIO.OUT)
GPIO.setup(MOTOR_A_C, GPIO.OUT)
GPIO.setup(MOTOR_A_D, GPIO.OUT)
GPIO.setup(MOTOR_B_A, GPIO.OUT)
GPIO.setup(MOTOR_B_B, GPIO.OUT)
GPIO.setup(MOTOR_B_C, GPIO.OUT)
GPIO.setup(MOTOR_B_D, GPIO.OUT)
GPIO.setup(MOTOR_C_A, GPIO.OUT)
GPIO.setup(MOTOR_C_B, GPIO.OUT)
GPIO.setup(MOTOR_C_C, GPIO.OUT)
GPIO.setup(MOTOR_C_D, GPIO.OUT)
GPIO.setup(MOTOR_D_A, GPIO.OUT)
GPIO.setup(MOTOR_D_B, GPIO.OUT)
GPIO.setup(MOTOR_D_C, GPIO.OUT)
GPIO.setup(MOTOR_D_D, GPIO.OUT)
def main():
try:
for motor in motors:
motor.reset()
Popen(['/usr/bin/omxplayer', VIDEO_FILE])
motors[0].move("up", 0.075, 40)
motors[1].move("down", 0.025, 40)
motors[2].move("down", 0.055, 40)
motors[3].move("up", 0.085, 40)
for t in threading.enumerate():
try:
t.join()
except RuntimeError as err:
if 'cannot join current thread' in err:
continue
else:
raise
GPIO.cleanup()
except KeyboardInterrupt:
print "exiting"
GPIO.cleanup()
sys.exit(0)
main()
```
#### File: through-a-data-point/code/shifter.py
```python
import sys
import RPi.GPIO as GPIO
import time
# NOTES
# each motor has a number 0-6
# each entry in the dictionary for each motor has a value for CLOCK, DATA and LATCH
# TODO need to determine values for going up and going down
GPIO.setmode(GPIO.BCM)
## 1
CLOCK_1 = 18 #green
DATA_1 = 23 #blue
LATCH_1 = 24 #orange
CLOCK_2 = 6 #orange
DATA_2 = 25 #purple
LATCH_2 = 12 #yellow
CLOCK_3 = 26 #white
DATA_3 = 16 #grey
LATCH_3 = 20 #white
forward_first = [
10, # 1010,
6, # 0110,
5, # 0101,
9 # 1001
]
forward_second = [160, 96, 80, 144]
backward_first = [
9, # 1001,
5, # 0101,
6, # 0110,
10 # 1010
]
backward_second = [144, 80, 96, 160]
class Motor:
def __init__(self, _id, _neighbour, _clock, _latch, _data, _position, _direction):
self.id = _id
self.clock = _clock
self.latch = _latch
self.data = _data
self.neighbour = _neighbour
self.first = (_id == "A" || _id == "C" || _id == "E") ? True : False
if(self.first):
self.fwd = [10, 6, 5, 9]
self.bwd = [9, 5, 6, 10]
else:
self.fwd = [160, 96, 80, 144]
self.bwd = [144, 80, 96, 160]
def clock():
GPIO.output(self.clock, 1)
time.sleep(.01)
GPIO.output(self.clock, 0)
def latch():
GPIO.output(self.latch, 1)
time.sleep(.01)
GPIO.output(self.latch, 0)
def writePin(value):
for x in range(0, 8):
temp = value & 0x80
if temp == 0x80:
GPIO.output(self.data, 1)
else:
GPIO.output(self.data, 0)
clock()
value = value << 0x01
latch()
def up(delay, steps):
for i in range(0, steps):
writePin(self.fwd[0])
time.sleep(delay)
writePin(self.fwd[1])
time.sleep(delay)
writePin(self.fwd[2])
time.sleep(delay)
writePin(self.fwd[3])
def down(delay, steps):
for i in range(0, steps):
writePin(self.bwd[0])
time.sleep(delay)
writePin(self.bwd[1])
time.sleep(delay)
writePin(self.bwd[2])
time.sleep(delay)
writePin(self.bwd[3])
def reset():
for state in [0, 0, 0, 0]:
writePin(state)
motors = []
# motors = {
# "A": {
# "NEIGHBOUR": "B",
# "CLOCK": CLOCK_1,
# "LATCH": LATCH_1,
# "DATA": DATA_1,
# "DOWN": 3,
# "UP": 12,
# "STILL": 0,
# "CURRENT": "STILL"
# },
# "B": {
# "NEIGHBOUR": "A",
# "CLOCK": CLOCK_1,
# "LATCH": LATCH_1,
# "DATA": DATA_1,
# "DOWN": 48,
# "UP": 192,
# "STILL": 0,
# "CURRENT": "STILL"
# },
# "C": {
# "NEIGHBOUR": "D",
# "CLOCK": CLOCK_2,
# "LATCH": LATCH_2,
# "DATA": DATA_2,
# "DOWN": 3,
# "UP": 12,
# "STILL": 0,
# "CURRENT": "STILL"
# },
# "D": {
# "NEIGHBOUR": "C",
# "CLOCK": CLOCK_2,
# "LATCH": LATCH_2,
# "DATA": DATA_2,
# "DOWN": 48,
# "UP": 192,
# "STILL": 0,
# "CURRENT": "STILL"
# },
# "E": {
# "NEIGHBOUR": "F",
# "CLOCK": CLOCK_3,
# "LATCH": LATCH_3,
# "DATA": DATA_3,
# "DOWN": 3,
# "UP": 12,
# "STILL": 0,
# "CURRENT": "STILL"
# },
# "F": {
# "NEIGHBOUR": "E",
# "CLOCK": CLOCK_3,
# "LATCH": LATCH_3,
# "DATA": DATA_3,
# "DOWN": 48,
# "UP": 192,
# "STILL": 0,
# "CURRENT": "STILL"
# }
# }
GPIO.setup(CLOCK_1, GPIO.OUT)
GPIO.setup(DATA_1, GPIO.OUT)
GPIO.setup(LATCH_1, GPIO.OUT)
GPIO.setup(CLOCK_2, GPIO.OUT)
GPIO.setup(DATA_2, GPIO.OUT)
GPIO.setup(LATCH_2, GPIO.OUT)
GPIO.setup(CLOCK_3, GPIO.OUT)
GPIO.setup(DATA_3, GPIO.OUT)
GPIO.setup(LATCH_3, GPIO.OUT)
GPIO.output(LATCH_1, 0) #we close the latch
GPIO.output(LATCH_2, 0)
GPIO.output(LATCH_3, 0)
GPIO.output(CLOCK_1, 0) #idk/
GPIO.output(CLOCK_2, 0)
GPIO.output(CLOCK_3, 0)
def pulseClock(_id):
GPIO.output(motors[_id]['CLOCK'], 1)
#GPIO.output(CLOCK_1, 1)
time.sleep(.01)
GPIO.output(motors[_id]['CLOCK'], 0)
#GPIO.output(CLOCK_1, 0)
def serLatch(_id):
GPIO.output(motors[_id]['LATCH'], 1)
#GPIO.output(LATCH_1, 1)
time.sleep(.01)
GPIO.output(motors[_id]['LATCH'], 0)
#GPIO.output(LATCH_1, 0)
#most significant bit out first!
def ssrWrite(_id, _dir):
motors[_id]["CURRENT"] = _dir
neighbour = motors[_id]["NEIGHBOUR"]
value = motors[_id][_dir] + motors[neighbour][motors[neighbour]["CURRENT"]]
for x in range(0, 8):
temp = value & 0x80 #we turn the base 10 value into an 8 bit digit
if temp == 0x80:
GPIO.output(motors[_id]['DATA'], 1) #write HIGH
#GPIO.output(DATA_1, 1)
else:
GPIO.output(motors[_id]['DATA'], 0) #write LOW
#GPIO.output(DATA_1, 0)
pulseClock(_id)
value = value << 0x01 #shift left
serLatch(_id)
def toBinary(value):
binaryValue = '0b'
for x in range(0, 8):
temp = value & 0x80
if temp == 0x80:
binaryValue = binaryValue + '1'
else:
binaryValue = binaryValue + '0'
value = value << 1
return binaryValue
def reset():
for key in motors:
ssrWrite(key, "STILL")
def main():
try:
reset()
#for key in motors:
# ssrWrite(key, "DOWN")
GPIO.cleanup()
except KeyboardInterrupt:
print "exiting"
GPIO.cleanup()
sys.exit(0)
main()
``` |
{
"source": "jkereako/algorithms-2",
"score": 4
} |
#### File: algorithms-2/algorithms/binary_tree.py
```python
def min_depth(T):
if T is None:
return 0
# The conditions below cover the only edge case: when the root node has only
# 1 child.
if T.left is None:
return 1 + min_depth(T.right)
if T.right is None:
return 1 + min_depth(T.left)
return 1 + min(min_depth(T.left), min_depth(T.right))
def max_depth(T):
if T is None:
return 0
return 1 + max(max_depth(T.left), max_depth(T.right))
def is_balanced(T):
return max_depth(T) - min_depth(T) <= 1
def is_mirror(p, q):
if p is None or q is None:
return p == q
return p.value == q.value and is_mirror(p.left, q.right) and is_mirror(p.right, q.left)
def lca(T, p, q):
if T is None:
return None
# This is the LCA
if T.value == p or T.value == q:
return T
# Explore subtrees.
left_lca = lca(T.left, p, q)
right_lca = lca(T.right, p, q)
if left_lca and right_lca:
return T
return left_lca if left_lca is not None else right_lca
def lca_bst(T, p, q):
if T is None:
return None
# Explore left subtree
if T.value > p and T.value > q:
return lca_bst(T.left, p, q)
# Explore right subtree
if T.value < p and T.value < q:
return lca_bst(T.right, p, q)
return T
``` |
{
"source": "jkereako/DailyCodingProblems",
"score": 3
} |
#### File: DailyCodingProblems/tests/test_problem_1.py
```python
import src
from src import problem_1
import unittest
import random
class TestProblem1(unittest.TestCase):
def test_empty_list_is_none(self):
result = problem_1.solution([], 42)
self.assertIsNone(result)
def test_single_element_list_is_none(self):
result = problem_1.solution([2], 33)
self.assertIsNone(result)
def test_solution_is_in_simple_list(self):
start = -3
end = 10
target_sum = 7
result = problem_1.solution([end, start], target_sum)
self.assertEqual(result[0], start)
self.assertEqual(result[1], end)
def test_solution_is_in_complex_list(self):
L = [18, 15, 2, 21, 34, -13, 10, 0, -3, 21]
target_sum = 17
result = problem_1.solution(L, target_sum)
self.assertEqual(result[0], 2)
self.assertEqual(result[1], 15)
def test_solution_is_not_in_complex_list(self):
L = [18, 15, 1, 29, 34, -13, 10, 0, -3, 21]
target_sum = 17
result = problem_1.solution(L, target_sum)
self.assertIsNone(result)
```
#### File: DailyCodingProblems/tests/test_problem_2.py
```python
import src
from src import problem_2
import unittest
import random
class TestProblem2(unittest.TestCase):
def test_empty_list_is_none(self):
result = problem_2.solution([])
self.assertIsNone(result)
def test_two_element_list_is_identity(self):
L = [2, 3]
result = problem_2.solution(L)
self.assertListEqual(result, L)
def test_non_zero_list(self):
L = [1, 2, 3, 4, 5]
result = problem_2.solution(L)
solution = [120, 60, 40, 30, 24]
self.assertListEqual(result, solution)
def test_zero_in_list(self):
L = [1, 0, 2, 3, 4, 5]
result = problem_2.solution(L)
self.assertIsNone(result)
``` |
{
"source": "jkereako/flask-api-skeleton",
"score": 3
} |
#### File: app/controllers/user.py
```python
from flask import abort, Blueprint, request, jsonify, g, url_for
from app.utils import *
from app.models.user import User
from app import db, auth
mod = Blueprint("user", __name__, url_prefix="/api")
@mod.route("/users", methods=["GET"])
def all():
return jsonify(
prepare_json_response(
message=None,
success=True,
data=[i.serialize for i in User.query.all()]
)
)
@mod.route("/user", methods=["POST"])
def create():
"""
$ curl -i -X POST -H "Content-Type: application/json" -d '{"username":"user","password":"<PASSWORD>"}' http://localhost:5000/api/user
"""
username = request.json.get("username")
password = request.json.get("password")
if username is None or password is None:
abort(400) # missing arguments
if User.query.filter_by(username=username).first() is not None:
abort(400) # existing user
a_user = User(username=username)
a_user.hash_password(password)
db.session.add(a_user)
db.session.commit()
return jsonify(
prepare_json_response(
message="User created",
success=True,
data={"username": a_user.username}
)
), 201, {"Location": url_for("user.single", id=a_user.id)}
@mod.route("/users/<int:id>", methods=["GET"])
def single(id):
user = User.query.get(id)
if not user:
abort(400)
return jsonify(
prepare_json_response(
message="User found",
success=True,
data={"username": user.username}
)
)
@mod.route("/resource", methods=["GET"])
@auth.login_required
def resource():
return jsonify(
prepare_json_response(
message="Hi there, %s! This is a protected resource." % g.user.username,
success=True,
data={"username": g.user.username}
)
)
@mod.route("/token", methods=["GET"])
@auth.login_required
def token():
token = g.user.generate_auth_token(600)
return jsonify(
prepare_json_response(
message=None,
success=True,
data={"token": token.decode("ascii"), "duration":600}
)
)
@auth.verify_password
def verify_password(username_or_token, password):
# first try to authenticate by token
user = User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
```
#### File: app/models/user.py
```python
from app import app, db
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from passlib.apps import custom_app_context
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(64))
def hash_password(self, password):
self.password_hash = custom_app_context.encrypt(password)
def verify_password(self, password):
return custom_app_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {"id":self.id,"username":self.username}
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config["SECRET_KEY"])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data["id"])
return user
``` |
{
"source": "jkereako/gysc",
"score": 3
} |
#### File: gysc/lib/enum.py
```python
from contract import Contract
class Enum(Contract):
def __init__(self, name, backing_type, cases):
if type(backing_type) is not str:
raise TypeError("Expected \"type\" to be a str object")
if not isinstance(cases, list):
raise TypeError("Expected \"properties\" to be a list")
Contract.__init__(self, name)
self.backing_type = backing_type
self.cases = cases
```
#### File: gysc/lib/generator.py
```python
import os
import time
from swagger import Swagger
from enum import Enum
from struct import Struct
from property import Property
# http://petstore.swagger.io/v2/swagger.json
class Generator(object):
def __init__(self, swagger):
if type(swagger) is not Swagger:
raise TypeError("Expected \"swagger\" to be a Swagger object")
self.swagger = swagger
def generate(self):
pass
def mkdir(self, dir):
if not os.path.exists(dir):
os.mkdir(dir)
``` |
{
"source": "jkerkela/best-ml-stock-predictor",
"score": 3
} |
#### File: best-ml-stock-predictor/data_analyzer/evaluate.py
```python
from IPython import display
import math
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
import tools.data_item_processor as dataItemProcessor
import tools.tensor_object_provider as tensorObjProvider
def evaluate_model(model_regressor,
evaluation_examples,
evaluation_targets):
"""Evaluates model.
In addition to evaluation, this function also prints evaluation progress information
and a evaluation loss.
Args:
model_regressor: A trained regressor object (DNN type)
evaluation_examples: A `DataFrame` containing one or more columns from
`stock_dataframe` to use as input features for evaluation.
evaluation_targets: A `DataFrame` containing exactly one column from
`stock_dataframe` to use as target for evaluation.
"""
evaluate_input_fn = lambda: tensorObjProvider.train_input_fn(
evaluation_examples,
evaluation_targets["1_month_future_value"],
num_epochs=1,
shuffle=False)
evaluation_predictions = model_regressor.predict(input_fn=evaluate_input_fn)
evaluation_predictions = np.array([item['predictions'][0] for item in evaluation_predictions])
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(evaluation_predictions, evaluation_targets))
print("Final RMSE (on test data): %0.2f" % root_mean_squared_error)
data_directory = str("../data_loader/data/")
evaluation_data_file = pd.read_csv(data_directory + "evaluate.csv", sep=",")
evaluation_examples = dataItemProcessor.preprocess_features(evaluation_data_file)
evaluation_targets = dataItemProcessor.preprocess_targets(evaluation_data_file)
print("Evaluate data key indicators:")
display.display(evaluation_examples.describe())
display.display(evaluation_targets.describe())
feature_columns = dataItemProcessor.construct_feature_columns(evaluation_examples)
regressor = tensorObjProvider.get_DNN_regressor(features=feature_columns)
evaluate_model(regressor,
evaluation_examples,
evaluation_targets)
``` |
{
"source": "jkerola/poetcli",
"score": 2
} |
#### File: poetcli/controllers/collectionController.py
```python
from cement import Controller, ex
from ..utils import databaseUtils, controllerUtils
class CollectionController(Controller):
class Meta:
label = 'collection controls'
@ex(
help='set currently active collection by id or name',
arguments=[
(
['-i', '--id'],
{
'help': 'collection id',
'action': 'store',
'dest': 'id'
}
)
]
)
def collection_active(self):
if self.app.pargs.id is not None:
col_id = self.app.pargs.id
databaseUtils.set_active_collection(self, col_id)
self.collection_list()
else:
col_id = databaseUtils.get_active_collection_id(self)
collection = self.app.db.table('collections').get(doc_id=col_id)
data = {'collection': collection}
self.app.render(data, 'select_collection.help.jinja2')
@ex(help='create a new collection')
def collection_new(self):
databaseUtils.create_poem_collection(self)
@ex(help='list all collections')
def collection_list(self):
data = {
'collections': controllerUtils.get_all_collections(self),
'active_collection': databaseUtils.get_active_collection_id(self)
}
self.app.render(data, 'list_collections.jinja2')
@ex(
help='permanently delete collection',
arguments=[
(
['-i', '--id'],
{
'help': 'collection id',
'action': 'store',
'dest': 'id'
}
)
]
)
def collection_delete(self):
if self.app.pargs.id is not None:
delete_id = self.app.pargs.id
databaseUtils.delete_collection(self, delete_id)
self.collection_list()
else:
self.app.render({}, 'delete_collection.help.jinja2')
```
#### File: poetcli/utils/controllerUtils.py
```python
from cement import shell
from cement.utils import fs
from datetime import datetime
from . import databaseUtils
def get_all_collections(self):
"""Return all collections in the database"""
return self.app.db.table('collections').all()
def get_all_poems_in_active_collection(self):
"""Return all poems in active collection"""
collection = get_active_collection(self)
return self.app.db.table(collection['name']).all()
def get_active_collection(self):
active = databaseUtils.get_active_collection_id(self)
return self.app.db.table('collections').get(doc_id=active)
def create_new_poem(self):
"""
Creates a new temp file, launches editor and then returns file contents
"""
try:
with fs.Tmp() as tmp:
with open(tmp.file, 'w+t') as file:
shell.cmd(f'nano {file.name}', capture=False)
content = file.read()
file.close()
if len(content.strip().strip('\n')) > 0:
save_poem_to_active_collection(self, content)
active_id = databaseUtils.get_active_collection_id(self)
active_col = databaseUtils.get_collection_by_id(self, active_id)
self.app.log.info(f'Saved poem into {active_col["name"]}')
else:
self.app.log.warning('Empty file detected, action aborted')
except FileNotFoundError:
self.app.log.error(
'Could not create a temporary file to write in'
)
def get_poem_by_id(self, poem_id):
"""Get a poem from the database by Id"""
try:
col_id = databaseUtils.get_active_collection_id(self)
collection = databaseUtils.get_collection_by_id(self, col_id)
return self.app.db.table(collection['name']).get(doc_id=poem_id)
except IndexError:
self.app.log.error(f'Poem id {poem_id} not found')
def edit_poem(self, poem_id):
"""Opens poem by id in editor"""
try:
with fs.Tmp() as tmp:
with open(tmp.file, 'w+t') as file:
poem = get_poem_by_id(self, poem_id)
previous_content = poem['content']
file.write(previous_content)
file.read()
shell.cmd(f'nano {file.name}', capture=False)
file.seek(0, 0)
content = file.read()
file.close()
if len(content.strip().strip('\n')) > 0:
if content == previous_content:
self.app.log.warning(
'No changes detected, no actions taken'
)
else:
poem = {
'created': poem['created'],
'content': content
}
active = get_active_collection(self)
self.app.db.table(active['name']).update(
poem, doc_ids=[poem_id]
)
self.app.log.info('Poem updated')
else:
self.app.log.warning(
'Empty file detected, discarding changes'
)
except FileNotFoundError:
self.app.log.error(
'Could not create a temporary file to write in'
)
def delete_poem(self, poem_id):
"""Delete poem permanently from active collection"""
if get_poem_by_id(self, poem_id) is not None:
active = get_active_collection(self)
self.app.db.table(active['name']).remove(doc_ids=[poem_id])
self.app.log.info(f'Deleted poem {poem_id} from {active["name"]}')
else:
self.app.log.error(f'Poem {poem_id} not found')
def save_poem_to_active_collection(self, content):
"""Save poem to active collection"""
try:
databaseUtils.init_on_empty_collection(self)
now = datetime.now()
poem = {
'created': now.strftime("%Y-%m-%d"),
'content': content
}
collection = get_active_collection(self)
self.app.db.table(collection['name']).insert(poem)
except Exception:
self.app.log.error('Could not save poem to database')
```
#### File: poetcli/tests/test_poetcli.py
```python
from pytest import raises
from poetcli.main import PoetCLITest
def test_poetcli():
# test poetcli without any subcommands or arguments
with PoetCLITest() as app:
app.run()
assert app.exit_code == 0
def test_poetcli_debug():
# test that debug mode is functional
argv = ['--debug']
with PoetCLITest(argv=argv) as app:
app.run()
assert app.debug is True
def test_create_poem():
argv = []
with PoetCLITest(argv=argv) as app:
app.run()
output = app.last_rendered
assert output is None
``` |
{
"source": "JKesslerPhD/FIPInjectionLogger",
"score": 2
} |
#### File: JKesslerPhD/FIPInjectionLogger/cron_data.py
```python
import sqlalchemy as sql
import pandas as pd
import statsmodels.formula.api as smf
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.express as px
from plotly import colors
import numpy as np
import json
import os
class Database():
def __init__(self, db_file="db.sqlite3"):
db_file = os.path.dirname(__file__)+"/"+db_file
self.engine = sql.create_engine('sqlite:///%s' % db_file)
self.fip_stats = None
self.weight = None
self.summary = None
self.quality = None
self.email_list = None
def get_email_list(self):
query = """
Select * from (
select
username,
email,
relapse_start,
count(InjectionLog_injectionlog.id) as LogEntries,
cast(julianday(max(InjectionLog_injectionlog.injection_time)) - julianday(min(InjectionLog_injectionlog.injection_time)) as int) as days_from_logs,
case
when relapse_start not NULL then
cast(julianday(date('now')) - julianday(relapse_start) as int)
else
cast(julianday(date('now')) - julianday(min(InjectionLog_injectionlog.injection_time))-extended_treatment as int)
end as days_from_time,
round((julianday(date('now')) - julianday(birthday))/365,0) as cat_age,
(max(InjectionLog_injectionlog.cat_weight)-min(InjectionLog_injectionlog.cat_weight))/min(InjectionLog_injectionlog.cat_weight)/cast(julianday(max(InjectionLog_injectionlog.injection_time)) - julianday(min(InjectionLog_injectionlog.injection_time)) as int)*100 as cat_weight,
InjectionLog_cats.*
from
InjectionLog_cats, InjectionLog_injectionlog, auth_user
left join InjectionLog_relapsedate on InjectionLog_cats.id = InjectionLog_relapsedate.cat_name_id
where
injectionlog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account =1) and
InjectionLog_cats.id = InjectionLog_injectionlog.cat_name_id and
InjectionLog_injectionlog.active=1 and
injectionLog_cats.owner_id = auth_user.id
group by injectionLog_cats.id ) where LogEntries>2 and days_from_time > 168 and (cured = 0 and bad=0)
"""
if not self.email_list:
self.email_list = df = pd.read_sql(query, self.engine)
return df
def get_cat_quality(self):
query = """
Select
*,
(cast(days/7 as int)+1) as week,
case
when wt_units = "lb" then round(dose/(cat_weight/2.2),0)
when wt_units = "kg" then round(dose/cat_weight,0)
end as dosage
from
(
Select
gs_brand,
round(julianday(injection_time) - julianday(treatment_start),0) as days,
concentration * injection_amount as dose,
cat_behavior_today,
wt_units,
cat_weight
from
InjectionLog_injectionlog, InjectionLog_cats, injectionLog_gsbrand
where
InjectionLog_injectionlog.cat_name_id=InjectionLog_cats.id and
InjectionLog_gsbrand.brand = InjectionLog_injectionlog.gs_brand
and InjectionLog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account = 1)
) as foo,
(
Select
gs_brand as brand,
count(*) as total_observations,
count(distinct(cat_name_id)) as logged_cats
from
InjectionLog_injectionlog, InjectionLog_cats
where
InjectionLog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account = 1) and
InjectionLog_injectionlog.cat_name_id = InjectionLog_cats.id group by gs_brand
) as log
where
log.brand = foo.gs_brand
"""
if not self.quality:
self.quality = df = pd.read_sql(query, self.engine)
return df
def get_weight(self):
query = """
Select
cat_weight,
days,
pct_change,
injection_amount,
gs_brand,
(case when admin_method = "Oral" then
cast(price as float)/(concentration)
else
cast(price as float)/(concentration*5)
end) as price,
round(case
when wt_units ="kg" then starting_wt*2.2
else starting_wt
end,1) as start_wt,
pct_change/days*100 as ratio,
round((julianday(date('now')) - julianday(birthday))/365,0) as cat_age
from
(Select
old.cat_weight as starting_wt,
round((InjectionLog_injectionlog.cat_weight-old.cat_weight)/old.cat_weight*100,0)/100 as pct_change,
round(julianday(InjectionLog_injectionlog.injection_time)-julianday(old.injection_time),0) as days,
coalesce(InjectionLog_usergs.price,InjectionLog_gsbrand.price) as price,
coalesce(InjectionLog_usergs.concentration,InjectionLog_gsbrand.concentration) as concentration,
*
from
InjectionLog_injectionlog,
InjectionLog_cats,
(select
min(injection_time) as injection_time,
min(cat_weight) as cat_weight,
cat_name_id
from
InjectionLog_injectionlog
group by cat_name_id) as old
left join InjectionLog_gsbrand on InjectionLog_gsbrand.brand=gs_brand
left join InjectionLog_usergs on InjectionLog_usergs.brand=gs_brand
where
InjectionLog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account =1) and
old.cat_name_id = InjectionLog_injectionlog.cat_name_id and
InjectionLog_cats.id = InjectionLog_injectionlog.cat_name_id )
where days>0 and ratio <5 and cat_age <10
"""
if not self.weight:
self.weight = df = pd.read_sql(query, self.engine)
return df
def get_summary(self):
query = """
Select * from (
select
username,
relapse_start,
count(InjectionLog_injectionlog.id) as LogEntries,
cast(julianday(max(InjectionLog_injectionlog.injection_time)) - julianday(min(InjectionLog_injectionlog.injection_time)) as int) as days_from_logs,
case
when relapse_start not NULL then
cast(julianday(date('now')) - julianday(relapse_start) as int)
else
cast(julianday(date('now')) - julianday(min(InjectionLog_injectionlog.injection_time))-extended_treatment as int)
end as days_from_time,
round((julianday(date('now')) - julianday(birthday))/365,0) as cat_age,
(max(InjectionLog_injectionlog.cat_weight)-min(InjectionLog_injectionlog.cat_weight))/min(InjectionLog_injectionlog.cat_weight)/cast(julianday(max(InjectionLog_injectionlog.injection_time)) - julianday(min(InjectionLog_injectionlog.injection_time)) as int)*100 as cat_weight,
InjectionLog_cats.*
from
InjectionLog_cats, InjectionLog_injectionlog, auth_user
left join InjectionLog_relapsedate on InjectionLog_cats.id = InjectionLog_relapsedate.cat_name_id
where
injectionlog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account =1) and
InjectionLog_cats.id = InjectionLog_injectionlog.cat_name_id and
InjectionLog_injectionlog.active=1 and
injectionLog_cats.owner_id = auth_user.id
group by injectionLog_cats.id ) where LogEntries>2
"""
if not self.summary:
self.summary = df = pd.read_sql(query, self.engine)
return df
def get_relapse_stats(self):
query = """
Select
round(julianday('now') - julianday(treatment_start),0) as days_since_start,
round(julianday(relapse_start)-julianday(treatment_start),0) as days_before_relapse,
case
when round(julianday(relapse_start)-julianday(treatment_start),0) > 84 THEN
round(julianday(relapse_start)-julianday(treatment_start)-84,0)
ELSE
0
end as days_into_observation,
round(julianday('now') - julianday(relapse_start),0) as days_since_relapse,
* from InjectionLog_relapsedate, InjectionLog_cats where
InjectionLog_relapsedate.cat_name_id == InjectionLog_cats.id and
InjectionLog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account = 1) group by cat_name_id order by relapse_start desc
"""
def get_fip_stats(self):
query = """
Select
fip_type as FIPType,
sum(case
when neuro=0 and ocular =0 then 1
else 0
end) as traditional,
sum(case
when neuro=0 and ocular=1 then 1
else 0 end) as ocular,
sum(case
when neuro=1 and ocular=0 then 1
else 0 end) as neuro,
sum(case when neuro=1 and ocular=1 then 1
else 0 end) as neuro_and_ocular
from
InjectionLog_cats
where
injectionlog_cats.owner_id not in (Select user_id from InjectionLog_userextension where test_account =1)
group by fip_type
"""
if not self.fip_stats:
self.fip_stats = df = pd.read_sql(query, self.engine)
return df
class Analysis():
def __init__(self):
pass
def save_file(self, json_dict, filename):
with open(filename,'w') as json_file:
json.dump(json_dict, json_file)
def generate_fip_summary(self, fip_summary):
total_cats = fip_summary["traditional"]+fip_summary["ocular"]+fip_summary["neuro"]+fip_summary["neuro_and_ocular"]
fip_summary = pd.melt(fip_summary,id_vars="FIPType", var_name="Symptoms")
fip_fig = px.bar(fip_summary, x="FIPType", y="value", color="Symptoms", title="Frequency of Symptoms by FIP Type")
fip_fig.update_layout(
yaxis_title="Number of cats with symptoms")
fip_div = plot(fip_fig, output_type='div', include_plotlyjs="cdn")
return {"total_cats":total_cats.to_dict(),"graph":str(fip_div)}
def generate_summary_stats(self,summary):
cat_age = summary.groupby("cat_age").count()
cat_age["age"] = cat_age.index
cat_age["counts"]=cat_age["name"]
age_fig = px.bar(cat_age,x="age", y="counts", title="Cats with FIP by age")
age_div = plot(age_fig,output_type='div', include_plotlyjs="cdn")
return {"graph":age_div}
def generate_weight_figure(self,weight):
fig = go.Figure()
fig = px.scatter(weight, x=weight["days"], y=weight["pct_change"]*100,
opacity=0.8, color=weight["start_wt"]
)
fig.update_layout(
title="Percent change from cat's starting weight (lb) for all logged data",
xaxis_title="Days of Treatment",
yaxis_title="Pct Change from Cat's Starting Weight")
wt_div = plot(fig, output_type='div', include_plotlyjs="cdn")
weight["cat_age"] = weight["cat_age"].astype('float')
model = smf.ols(formula="pct_change ~ 0 + start_wt+days + cat_age", data=weight).fit()
mult = model.params["days"]
try:
ints = model.params["Intercept"]
except:
ints = 0
daily_price = weight["price"].mean()
stwt = model.params["start_wt"]
age = model.params["cat_age"]
model_dict = {"ints":ints,"stwt":stwt,"mult":mult, "age":age, "daily_price":daily_price}
return {"graph":wt_div,"model":model_dict}
def generate_cat_treatment_dist(self, summary):
duration_df = summary[summary["days_from_time"]<=84].groupby("days_from_time").count()
cats84 = summary[summary["days_from_time"]>84].count()[0]
cured_cats = summary[summary["cured"]>0].count()[0]
duration_df["Days since starting"]=duration_df.index
duration_df["counts"]=duration_df["name"]
duration_fig = px.bar(duration_df, x="Days since starting", y="counts", title="# of cats with data being logged relative to how many days ago treatment was started")
duration_fig.update_layout(
xaxis_title="Days since starting treatment",
yaxis_title="# of cats with data on this site")
duration_div=plot(duration_fig,output_type="div", include_plotlyjs="cdn")
return {"graph":duration_div, "total_cats":str(cats84),"cured_cats":str(cured_cats)}
def generate_quality_stats(self, quality, cutoff=5):
ag_stats = quality.groupby(['gs_brand','week']).agg(
behavior = pd.NamedAgg(column='cat_behavior_today', aggfunc=np.mean),
stdev = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.std),
be_max = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.max),
be_min = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.min),
records=pd.NamedAgg(column='cat_behavior_today', aggfunc='count')
)
ag_week = quality.groupby(['week']).agg(
behavior = pd.NamedAgg(column='cat_behavior_today', aggfunc=np.mean),
stdev = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.std),
be_max = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.max),
be_min = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.min),
records=pd.NamedAgg(column='cat_behavior_today', aggfunc='count')
)
ag_stats["sem"] = ag_stats["stdev"]/np.sqrt( ag_stats["records"])
results = ag_stats[ag_stats["records"]>= cutoff*7]
results["upper"] = results["behavior"] + results["sem"]
results["lower"] = results["behavior"] - results["sem"]
results = results.reset_index()
ag_week.reset_index(inplace=True)
sem = ag_week["stdev"]
x = ag_week["week"].tolist()
y = ag_week["behavior"].to_list()
upper = y+sem.fillna(0)
lower = y-sem.fillna(0)
upper = upper.tolist()
lower = lower.tolist()
fig = px.scatter(x=results["week"],
y=results["behavior"],
color=results["gs_brand"],
error_y=results["sem"]*1.96)
fig.add_scatter(
x=x+x[::-1], # x, then x reversed
y=upper+lower[::-1], # upper, then lower reversed
fill='toself',
fillcolor='rgba(0,0,0,0.2)',
line=dict(color='rgba(255,255,255,0)'),
hoverinfo="skip",
showlegend=True,
name="Expected Range")
fig.add_scatter(
x=x,
y=y,
showlegend=False,
line=dict(color='rgb(128,128,128,.25)'),
hoverinfo='skip',
mode='lines'
)
fig.update_layout(yaxis_title="How the cat is doing (1-5 point scale)", xaxis_title="Week Number",
title="How cats improve on GS over time",
xaxis=dict(range=[0.8,max(results["week"])+.2]),
yaxis=dict(range=[1,5.5]),
)
quality_plot = plot(fig, output_type="div", include_plotlyjs="cdn")
return {"graph":quality_plot}
def generate_brand_stats(self, quality):
ag_stats = quality.groupby(['gs_brand','week']).agg(
behavior = pd.NamedAgg(column='cat_behavior_today', aggfunc=np.mean),
stdev = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.std),
be_max = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.max),
be_min = pd.NamedAgg(column='cat_behavior_today',aggfunc=np.min),
records=pd.NamedAgg(column='cat_behavior_today', aggfunc='count')
)
results = ag_stats.reset_index()
fig = px.bar(results, x="week", y="records", color="gs_brand",
title="# of days of treatment logged per brand")
fig.update_layout(yaxis_title="Days logged per week", xaxis_title="Week Number")
brand_stats = plot(fig, output_type="div", include_plotlyjs="cdn")
return {"graph":brand_stats}
def generate_treatment_stats(self, summary):
being_treated = summary[summary["days_from_time"]<=84].count()[0]
observation_cats = summary[(summary["days_from_time"]>84) & (summary["days_from_time"]<168)].count()[0]
post_treatment = summary[summary["days_from_time"]>84].count()[0]
cured_cats = summary[summary["cured"]>0].count()[0]
bad_outcome = summary[summary["bad"]>0].count()[0]
relapse = summary[~summary["relapse_start"].isnull()]
relapse_treatment = relapse[relapse["days_from_time"]<=84].count()[0]
relapse_rate = relapse.count()[0]/post_treatment*100
unknown_status = post_treatment - observation_cats - bad_outcome - cured_cats
results = {"being_treated":int(being_treated), "obseration_cats":int(observation_cats), "post_treatment":int(post_treatment),
"cured_cats":int(cured_cats), "relapse_cats":int(relapse_treatment),
"total_relapse":int(relapse.count(0)[0]), "relapse_rate":float(relapse_rate),
"unk":int(unknown_status), "bad_outcome":int(bad_outcome)}
return results
def generate_graphs(output_file="data_output.txt"):
data = Database()
an = Analysis()
weight = data.get_weight()
summary = data.get_summary()
fip_summary = data.get_fip_stats()
quality = data.get_cat_quality()
graph_summary = an.generate_summary_stats(summary)
graph_distribution = an.generate_cat_treatment_dist(summary)
graph_fip_stats = an.generate_fip_summary(fip_summary)
graph_wt_change = an.generate_weight_figure(weight)
graph_quality = an.generate_quality_stats(quality)
brand_stats = an.generate_brand_stats(quality)
treatment_stats = an.generate_treatment_stats(summary)
combined_dict = {
"summary":graph_summary,
"distribution":graph_distribution,
"fip_stats":graph_fip_stats,
"weight":graph_wt_change,
"quality":graph_quality,
"brands":brand_stats,
"treatment_stats":treatment_stats
}
an.save_file(combined_dict,output_file)
def read_data(filename):
with open(filename) as json_file:
data = json.load(json_file)
return data
if __name__=="__main__":
directory=os.path.dirname(__file__)
generate_graphs(directory+"/data_output.txt")
``` |
{
"source": "jkeung/Movies_Analysis",
"score": 3
} |
#### File: jkeung/Movies_Analysis/util.py
```python
from sys import argv
import datetime as dt
import requests
from bs4 import BeautifulSoup
import dateutil.parser as parse
import urlparse
import os
import pickle
import re
import time
OUTPUTDIR = 'output'
FILENAMETEMPLATE = 'movie_dictionary'
def create_dir(directory):
#Check to see if directory exists
if not os.path.exists(directory):
os.makedirs(directory)
def get_soup_from_url(url):
'''
Takes url and returns bs4.BeautifulSoup class. If the soup already exists
locally, it will load from file, otherwise it will pull from a url. It will
also create a directory structure (in the current working directory) based on the url.
Ex: 'http://www.boxofficemojo.com/movies/?id=ateam.htm'
Creates: /data/www.boxofficemojo.com/movies/ateam.htm
Args:
url (string)
Returns:
soup (bs4.BeautifulSoup):
'''
parsed_url = urlparse.urlparse(url)
path = []
for item in parsed_url:
for x in item.split():
path.append(x.replace('/', ''))
outfile = path[-1]
outfile = outfile[outfile.find('=') + 1:]
outpath = 'data/' + '/'.join(path[1:-1])
out_pathfile = os.path.join(outpath, outfile)
#Check to see if directory exists
create_dir(outpath)
#Check to see if file exists
try:
soup = pickle.load(open('{}'.format(out_pathfile), 'rb'))
except:
#If doesn't exist, try to get soup from page
try:
soup = BeautifulSoup(requests.get(url).text, 'lxml')
pickle.dump(soup, open('{}'.format(out_pathfile), 'wb'))
except:
pass
return soup
def get_title(soup):
'''
Function to get movie title from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
title (string): A string of the title of the movie
'''
try:
index = soup.title.text.find(' - Box Office Mojo')
title = str(soup.title.text[:index])
return title
except:
return 'N/A'
def get_domestic_gross(soup):
'''
Function to get total domestic gross from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
number (float): A float of the domestic gross of the movie
'''
try:
number = soup.find_all(text='Domestic:')[0].parent.parent.parent.td.next_sibling.next_sibling.b.text
number = float(number.replace(',','').replace('$',''))
return number
except:
return 'N/A'
def get_distributor(soup):
'''
Function to get total domestic gross from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
word (string): The distributor of the movie
'''
try:
word = soup.find(text = 'Distributor: ').next_sibling.text
return str(word)
except:
return 'N/A'
def get_genre(soup):
'''
Function to get genre from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
genre (string): The genre of the movie
'''
try:
genre = str(soup.find(text = 'Genre: ').next_sibling.text)
return genre
except:
return 'N/A'
def get_runtime(soup):
'''
Function to get total movie time in minutes from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
runtime (int): The runtime of the movie
'''
try:
time = soup.find(text = 'Runtime: ').next_sibling.text
if time.find('hrs.') > 0 and time.find('min.') > 0:
hours = int(time[:time.find('hrs.')])
minutes = int(time[time.find('min.') - 3:time.find('min.')])
runtime = (hours*60) + minutes
return runtime
else:
return None
except:
return 'N/A'
def get_rating(soup):
'''
Function to get MPAA rating from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
rating (string): The rating of the movie
'''
try:
rating = str(soup.find(text = 'MPAA Rating: ').next_sibling.text)
return rating
except:
return 'N/A'
def get_budget(soup):
'''
Function to get production budget (in millions)from the Box Office Mojo individual
movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
budget (string): The production budget of the movie
'''
try:
budget = str(soup.find(text = 'Production Budget: ').next_sibling.text)
budget = int(budget[1:budget.find('million')])
return budget
except:
return 'N/A'
def get_release_date(soup):
'''
Function to get release date from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
date (date): The release date of the movie
'''
try:
date = soup.find(text = 'Release Date: ').next_sibling.text
date = parse.parse(date, fuzzy = True).date()
return date
except:
return 'N/A'
def get_directors(soup):
'''
Function to get directors from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
list: A list of director(s) of the movie
'''
fieldlist = []
try:
for x in soup.find_all(text='Director:')[0].parent.parent.parent.nextSibling.font:
word = str(x.string)
if word[0].isupper() and word != 'None':
fieldlist.append(word)
return fieldlist
except:
return 'N/A'
def get_writers(soup):
'''
Function to get writers from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
list: A list of writer(s) of the movie
'''
fieldlist = []
try:
for x in soup.find_all(text='Writers:')[0].parent.parent.parent.nextSibling.font:
word = str(x.string)
if word[0].isupper() and word != 'None':
fieldlist.append(word)
return fieldlist
except:
return 'N/A'
def get_actors(soup):
'''
Function to get actors from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
list: A list of actor(s) in the movie
'''
fieldlist = []
try:
for x in soup.find_all(text='Actors:')[0].parent.parent.parent.nextSibling.font:
word = str(x.string)
if word[0].isupper() and word != 'None':
fieldlist.append(word)
return fieldlist
except:
return 'N/A'
def get_producers(soup):
'''
Function to get producers from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
list: A list of producer(s) of the movie
'''
fieldlist = []
try:
for x in soup.find_all(text='Producers:')[0].parent.parent.parent.nextSibling.font:
word = str(x.string)
if word[0].isupper() and word != 'None':
fieldlist.append(word)
return fieldlist
except:
return 'N/A'
def get_composers(soup):
'''
Function to get composers from the Box Office Mojo individual movie site's soup.
Args:
soup (BeautifulSoup): The beautiful soup that is obtained from requests.get(url)
Returns:
list: A list of writer(s) of the movie
'''
fieldlist = []
try:
for x in soup.find_all(text='Domestic Total Gross: ')[0].parent.parent.parent.nextSibling.font:
word = str(x.string)
if word[0].isupper() and word != 'None':
fieldlist.append(word)
return fieldlist
except:
return 'N/A'
def get_movie_dictionary(url, d = {}):
'''
Function to create or append to a dictionary from the Box Office Mojo individual movie page.
Args:
url (string): The url of the Box Office Mojo individual movie page
Ex: 'http://www.boxofficemojo.com/movies/?id=ateam.htm'
Returns:
d (dictionary): A dictionary of the movie
'''
#Get soup from url locally or from Box Office Mojo directly
soup = get_soup_from_url(url)
#Get movie information
title = get_title(soup)
genre = get_genre(soup)
gross = get_domestic_gross(soup)
release_date = get_release_date(soup)
runtime = get_runtime(soup)
budget = get_budget(soup)
rating = get_rating(soup)
distributor = get_distributor(soup)
directors = get_directors(soup)
actors = get_actors(soup)
writers = get_writers(soup)
producers = get_producers(soup)
#Update dictionary
d.setdefault(title, {}).update({'genre':genre})
d.setdefault(title, {}).update({'gross':gross})
d.setdefault(title, {}).update({'date':release_date})
d.setdefault(title, {}).update({'runtime':runtime})
d.setdefault(title, {}).update({'budget':budget})
d.setdefault(title, {}).update({'rating':rating})
d.setdefault(title, {}).update({'distributor':distributor})
d.setdefault(title, {}).update({'directors':directors})
d.setdefault(title, {}).update({'actors':actors})
d.setdefault(title, {}).update({'writers':writers})
d.setdefault(title, {}).update({'producers':producers})
return d
def get_movie_url_list(letter):
'''
Function to get a list of urls of all movies that begin with a particular letter from the
Box Office Mojo individual movie page.
Args:
letter (string): The letter to pull for movies on the Box Office Mojo A-Z listing page.
Ex: 'http://www.boxofficemojo.com/movies/alphabetical.htm?letter={letter}&p=.htm'
Returns:
url_list2 (list): A list of urls for movies that start with letter.
'''
#Create a dictionary for the number of sublinks per letter
link_dict = {'#':1, 'A':10, 'B':8, 'C':7, 'D':6, 'E':7, 'F':6, 'G':7, 'H':6, 'I':6, \
'J':4, 'K':4, 'L':5, 'M':6, 'N':5, 'O':5, 'P':7, 'Q':1, 'R':6, 'S':13, 'T':8, \
'U':2, 'V':3, 'W':6, 'X':1, 'Y':2, 'Z':2}
base = 'http://www.boxofficemojo.com/'
url_list1 = ['http://www.boxofficemojo.com/movies/alphabetical.htm?letter={0}&page={1}&p=.htm'.format(letter, num) \
for num in range(1, link_dict[letter] + 1) ]
url_list2 = []
for url_1 in url_list1:
soupin = BeautifulSoup(requests.get(url_1).text, 'lxml')
movie_list_table = soupin.find(text = 'ALPHABETICAL INDEX').parent.parent.parent.parent.parent.parent
movie_list = movie_list_table.find_all('a', href=re.compile('^/movies/\?id='))
for movie in movie_list:
url_list2.append(base + movie['href'])
return url_list2
def update_movie_dictionary(file, url_list):
'''
Function to create/load a dictionary if exists and update with movie information.
Args:
letter (string): The letter to pull for movies on the Box Office Mojo A-Z listing page.
url_list (list): A list of urls for individual moviepages on the Box Office Mojo
Returns:
url_list2 (list): A list of urls for movies that start with letter.
'''
try:
d = pickle.load(open('{}'.format(file), 'rb'))
except:
d = {}
for url in url_list:
d.update(get_movie_dictionary(url, d))
pickle.dump(d, open('{}'.format(file), 'wb'))
# def main():
# script, letter = argv
# start_time = time.time()
# create_dir(OUTPUTDIR)
# url_list = get_movie_url_list(letter)
# output_path = os.path.join(OUTPUTDIR, '{0}_{1}.p'.format(FILENAMETEMPLATE, letter))
# update_movie_dictionary(output_path, url_list)
# print ("--- %s seconds ---\n") % (time.time() - start_time)
# print ("Dictionary created successfully!")
# if __name__ == '__main__':
# main()
``` |
{
"source": "jkeung/mta_project",
"score": 3
} |
#### File: mta_project/clean_data/clean_util.py
```python
from datetime import datetime, timedelta
import pandas as pd
import time
OUTFILE = 'MTA_DATA.p'
def get_data():
"""Downloads data from online MTA data source
Loops through all recent data, and appends it to a single csv datafile
Ex. http://web.mta.info/developers/data/nyct/turnstile/turnstile_150919.txt
Args:
None
Returns:
None
"""
end_date = datetime.strptime(time.strftime("%y%m%d"), '%y%m%d')
begin_date = datetime.strptime('141025', '%y%m%d')
base_link = 'http://web.mta.info/developers/data/nyct/turnstile/turnstile_'
while(begin_date < end_date):
link = '{0}{1}.txt'.format(base_link, begin_date.strftime("%y%m%d"))
print 'Retrieving data from {0}...'.format(link)
try:
new_df = pd.read_csv(link)
df = df.append(new_df, ignore_index=True)
except:
df = pd.read_csv(link)
begin_date = begin_date + timedelta(days=7)
return df
def add_clean_columns(df):
"""Cleans the dataframe
Adds: 'DAY', 'MONTH', 'TIMEFRAME_ENTRIES', 'TIMEFRAME_EXITS'
Removes: 'ENTRIES', 'EXITS'
Filters: Keeps entries and exits only between 0 and 5000
ORDER MATTERS FOR CLEANING
Args:
df (pandas.DataFrame): The uncleaned pandas dataframe
Returns:
df (pandas.DataFrame): The cleaned pandas dataframe
"""
df = df.rename(columns={'EXITS ': 'EXITS'})
df = add_day_month(df)
df = add_entry_exit_totals(df)
df = add_traffic_column(df)
df = add_time_bin_column(df)
return df
def add_day_month(df):
"""Creates columns for the Day, Day int value, and the Month
Args:
df (pandas.DataFrame): The original pandas dataframe
Returns:
df (pandas.DataFrame): The pandas dataframe with the DAY, DAY_NUM, and MONTH columns
"""
df['DAY'] = df['DATE'].apply(
lambda x: datetime.strptime(x, '%m/%d/%Y').strftime('%a'))
df['DAY_NUM'] = df['DATE'].apply(
lambda x: datetime.strptime(x, '%m/%d/%Y').strftime('%w'))
df['MONTH'] = df['DATE'].apply(
lambda x: datetime.strptime(x, '%m/%d/%Y').strftime('%m'))
return df
def add_entry_exit_totals(df):
"""Creates two columns containing both the sum of ENTRIES and EXITS
Args:
df (pandas.DataFrame): The original pandas dataframe
Returns:
df (pandas.DataFrame): The pandas dataframe with the TIMEFRAME_ENTRIES and TIMEFRAME_EXITS columns
and drops the ENTRIES and EXITS columns
"""
entries = df['ENTRIES'] - \
df.groupby(['C/A', 'UNIT', 'SCP', 'STATION'])['ENTRIES'].shift(1)
exit = df['EXITS'] - \
df.groupby(['C/A', 'UNIT', 'SCP', 'STATION'])['EXITS'].shift(1)
df['TIMEFRAME_ENTRIES'] = entries
df['TIMEFRAME_EXITS'] = exit
df = df.drop('ENTRIES', 1)
df = df.drop('EXITS', 1)
df.dropna()
return df
def add_traffic_column(df):
"""Add a TRAFFIC column that is the sum of the Entries and Exits for a station
Args:
df (pandas.DataFrame): The original pandas dataframe
Returns:
df (pandas.DataFrame): The pandas dataframe with the TRAFFIC column and TIMEFRAME_ENTRIES
and TIMEFRAME_EXITS columns removed
"""
df = df[(df['TIMEFRAME_ENTRIES'] >= 0) &
(df['TIMEFRAME_ENTRIES'] <= 5000)]
df = df[(df['TIMEFRAME_EXITS'] >= 0) &
(df['TIMEFRAME_EXITS'] <= 5000)]
df['TRAFFIC'] = df['TIMEFRAME_ENTRIES'] + df['TIMEFRAME_EXITS']
df = df.drop('TIMEFRAME_ENTRIES', 1)
df = df.drop('TIMEFRAME_EXITS', 1)
return df
def add_time_bin_column(df):
"""
Takes a dataframe and creates a column with the times binned by every 4 hours
Args:
df (pandas.DataFrame): The original pandas dataframe
Returns:
df (pandas.DataFrame): The pandas dataframe with the TIME_BIN column
"""
df["TIME_INT"] = df["TIME"].map(lambda x: int(x.replace(":", "")))
df["TIME_BIN"] = df["TIME_INT"].map(lambda x: get_range(x))
df = df.drop("TIME_INT", 1)
return df
def get_range(time):
"""An function used to get the correct 4 hour interval for the TIME_BIN column
Takes a dataframe and creates a column with the times binned by every 4 hours
Args:
time (int): A time int representation in the format hhmmss
Ex: noon would be represented as 120000
Returns:
output (float): The 4 hour time interval that the integer input time belongs to
"""
hours = [0, 40000, 80000, 120000, 160000, 200000]
prev = 0
output = 0.0
for h in hours:
if time <= h and time > prev:
output = float(h/10000)
return output
elif time == 200000:
output = float(200000/10000)
return output
elif time > float(200000):
output = float(24)
return output
# midnight
elif time == 0:
output = float(24)
return output
def main():
print "Pulling data..."
df = get_data()
df = add_clean_columns(df)
df.to_pickle(OUTFILE)
print "Pulling data complete!"
print "Data saved to {0}".format(OUTFILE)
if __name__ == '__main__':
main()
``` |
{
"source": "jkeuper/transip_dyndns",
"score": 3
} |
#### File: jkeuper/transip_dyndns/dyndns.py
```python
import sys
import argparse
from requests import get
from transip_rest_client import TransipRestClient
def getOptions(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="DynDNS: Updates a DNS record for a dynamic IP address.")
parser.add_argument("-u", "--user", help="Your username.", required=True)
parser.add_argument("-k", "--key", help="Key file containing RSA private key.", required=True)
parser.add_argument("-n", "--name", help="Name of the record (e.g. 'www').", required=True)
parser.add_argument("-d", "--domain", help="Existing DNS domain (e.g. 'example.com').", required=True)
parser.add_argument("-v", "--verbose", action='store_true', help="Verbose mode.")
options = parser.parse_args(args)
return options
def find(arr , id):
for x in arr:
if x["name"] == id:
return x
def main(key, username, domain, name, verbose):
with open(key, 'r') as f:
my_RSA_key = f.read()
if "BEGIN RSA PRIVATE KEY" not in my_RSA_key:
print("Key in incorrect format, convert the key with the following command:")
print("openssl rsa -in privatekey.txt -out rsaprivatekey.txt")
return
newIp = get('https://api.ipify.org').text
if verbose:
print(f"Retrieved IP from api.ipify.org: {newIp}")
client = TransipRestClient(user=username, rsaprivate_key=my_RSA_key, global_key=True)
entries = client.get_dns_entries(domain=domain)
if verbose:
print(f"Found {len(entries)} DNS entries")
entry = find(entries, name)
if entry is None:
print(f"No ip found, adding {newIp}")
client.post_dns_entry(domain=domain, name=name, expire=300, record_type='A', content=newIp)
else:
oldIp = entry["content"]
if verbose:
print(f"Found current IP in DNS entry: {oldIp}")
if oldIp != newIp:
print(f"Updating {oldIp} to {newIp}")
client.patch_dns_entry(domain=domain, name=name, record_type='A', content=newIp)
else:
print(f"Not updating {oldIp}")
if __name__ == "__main__":
options = getOptions()
if options.verbose:
print("Verbose output enabled.")
main(options.key, options.user, options.domain, options.name, options.verbose)
``` |
{
"source": "jkeyes/amzlist",
"score": 4
} |
#### File: amzlist/amzlist/__init__.py
```python
class Node(object):
""" A Node is a simple object with two attributes, `next` and `data`.
`data` stores a value, and `next` holds a reference to another Node.
"""
strict = False
def __init__(self, data):
""" Initialize a new Node with the specified data. """
self.next = None
""" Next text """
self.data = data
""" Data text """
def __setattr__(self, key, value):
""" Override `__setattr__` to ensure that next is a Node. """
if key == "next" and value:
if value is not None:
if not isinstance(value, Node):
raise TypeError
if Node.strict and value.next:
# If we are in strict mode we check to make sure this
# modification to `next` will not create a cycle.
node = value.next
while node:
if node == self:
raise ValueError("Cannot insert %s cycle detected" \
% (value.data))
node = node.next
super(Node, self).__setattr__(key, value)
def __str__(self):
""" Returns the string representation e.g. A->None, or A->B. """
next = self.next.data if self.next else "None"
return "%s->%s" % (self.data, next)
class LinkedList(object):
""" A LinkedList implementation. """
def __init__(self, strict=None):
""" Initialize a new LinkedList. """
if strict is None:
strict = False
Node.strict = strict
super(LinkedList, self).__init__()
self.first_node = None
@property
def next(self):
""" Returns the `next` Node. """
return self.first_node.next
@property
def data(self):
""" Returns the `data` for the first Node. """
return self.first_node.data
@property
def last_node(self):
""" Returns the last Node. """
nodes = self.as_list()
if nodes:
# If there are nodes return the last one.
return nodes[-1]
# No nodes, return None
return None
def prepend(self, node):
""" Inserts `node` at the head of the LinkedList. """
if not isinstance(node, Node):
# If the node parameter is not a Node then update it
# to refer to one.
node = Node(node)
# The new node will store the current first_node in it's next attribute.
node.next = self.first_node
# Update the first_node reference to the new node.
self.first_node = node
def append(self, node):
""" Inserts `node` at the tail of the LinkedList. """
if not isinstance(node, Node):
# If the node parameter is not a Node then update it
# to refer to one.
node = Node(node)
if self.first_node is None:
# The first_node is None therefore we just set it.
self.first_node = node
else:
# Find the last_node in the list and update it's next attribute.
self.last_node.next = node
def insert(self, node, after):
""" Inserts `node` and makes `after.next` refer to it. """
if not isinstance(after, Node):
# If the after parameter is not a Node raise an error.
raise TypeError("After must be a Node not a %s" % (type(after)))
if not isinstance(node, Node):
# If the node parameter is not a Node then update it
# to refer to one.
node = Node(node)
node.next = after.next
after.next = node
def push(self, node):
""" Prepends a Node to the head. """
self.prepend(node)
def pop(self):
""" Returns the Node from the head, and removes it. """
res = self.first_node
self.first_node = self.first_node.next
return res
def remove(self, node):
""" Remove the specified `node`.
If the `node` parameter is a Node, and it has `data` and
a `next` Node then the first Node with encountered that
has the same `data` and `next` attribute values will be
removed.
If the `node` parameter is a value other than a Node or a
Node with just a `data` attribute value, then the first node
encountered with the same `data` attribute is removed.
"""
curr, prev = self.find(node, inc_prev=True)
if curr:
self._remove(curr, prev)
def find(self, node, inc_prev=None):
""" Find the specified Node.
If the `node` parameter is a Node, and it has `data` and
a `next` Node then the first Node with encountered that
has the same `data` and `next` attribute values will match.
If the `node` parameter is a value other than a Node or a
Node with just a `data` attribute value, then the first node
encountered with the same `data` attribute value will match.
If `inc_prev` is `True`, this method returns the node and
it's previous node in a tuple, otherwise it returns the node.
This method returns `None` if the node cannot be found.
"""
if inc_prev is None:
# Default include previous node to False
inc_prev = False
if not isinstance(node, Node):
# If the node parameter is not a Node then update it
# to refer to one.
node = Node(node)
if node.next:
# match based on Node
test = lambda curr, node: curr == node
else:
# match based on data
test = lambda curr, node: curr.data == node.data
prev = None
curr = self.first_node
# Iterate over each node.
while curr:
if test(curr, node):
# include the previous node in the return value
if inc_prev:
return (curr, prev)
# just return the node
else:
return curr
prev = curr
curr = curr.next
# No node could be found.
raise ValueError("Node %s could not be found." % (node.data))
def _remove(self, curr, prev):
""" Remove `curr` and update the next attribute for `prev`. """
if prev:
# If there is a previous node then update it's next attribute
# to refer to the next node of the node that is being removed.
prev.next = curr.next
else:
# If there is no previous node then we are at the head of the list.
# Update the first_node reference to the next node of the node
# that is being removed.
self.first_node = curr.next
# Delete the node that has been delinked.
del curr
def reverse_iterative(self):
""" Returns a new LinkedList with the Nodes in reverse order.
This method uses an iterative approach.
"""
# Create the new LinkedList.
new_list = LinkedList()
# Set the initial node to reverse from.
node = self.first_node
# iterate over each node and stop when node is None
while node:
next = node.next
# Prepend the node to the new list.
new_list.prepend(node)
# Update the node reference.
node = next
return new_list
def reverse_recursive(self, node=None, new_list=None):
""" Returns a new LinkedList with the Nodes in reverse order.
This method uses a recursive approach.
"""
if new_list is None:
# First time through we initalise the new LinkedList
# and set the initial node to being reversing from.
new_list = LinkedList()
if node is None:
node = self.first_node
if node:
# If we have a node then prepend it to the new list.
# As all nodes are being prepended the new list will
# have the nodes in the reverse order.
next = node.next
new_list.prepend(node)
if next is not None:
# If there are more nodes call this method again.
self.reverse_recursive(next, new_list)
# Node reference is None, so we've reached the end of
# the LinkedList.
return new_list
def as_list(self):
""" Returns this LinkedList as a `list` of Nodes. """
nodes = []
node = self.first_node
while node:
nodes.append(node)
node = node.next
return nodes
def __len__(self):
""" Returns the length/size of this LinkedList. """
return len(self.as_list())
def __str__(self):
""" The string representation of the LinkedList. """
return "->".join([str(n.data) for n in self.as_list()])
```
#### File: amzlist/tests/test_node.py
```python
from unittest import TestCase
from amzlist import Node
from nose.tools import raises
class NodeTest(TestCase):
""" Test the Node """
@raises(TypeError)
def test_no_data(self):
""" Test instantiation of a Node """
Node()
@raises(TypeError)
def test_next_not_node(self):
""" Test instantiation of a Node """
a = Node("A")
a.next = "B"
def test_to_str(self):
""" Test instantiation of a Node """
b = Node("B")
a = Node("A")
a.next = b
self.assertEqual("A->B", str(a))
``` |
{
"source": "jkeyes/pathfinder",
"score": 4
} |
#### File: pathfinder/pathfinder/filters.py
```python
import fnmatch as fnmatch_module
import os
import re
from math import sqrt
class Filter(object):
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
def find(self, filepath):
from pathfinder import walk_and_filter
return walk_and_filter(filepath, self)
class AlwaysAcceptFilter(Filter):
""" Accept every path. """
def accepts(self, _):
""" Always returns True. """
return True
class DirectoryFilter(Filter):
""" Accept directory paths. """
def accepts(self, filepath):
""" Returns True if filepath represents a directory. """
return os.path.isdir(filepath)
class FileFilter(Filter):
""" Accept file paths. """
def accepts(self, filepath):
""" Returns True if filepath represents a file. """
return os.path.isfile(filepath)
class RegexFilter(Filter):
""" Accept paths if they match the specified regular expression. """
def __init__(self, regex):
""" Initialize the filter with the specified regular expression. """
super(RegexFilter, self).__init__()
self.regex = re.compile(regex)
def accepts(self, filepath):
""" Returns True if the regular expression matches the filepath. """
return self.regex.match(filepath) is not None
class FnmatchFilter(Filter):
""" Accept paths if they match the specifed fnmatch pattern. """
def __init__(self, pattern):
""" Initialize the filter with the specified fnmatch pattern. """
super(FnmatchFilter, self).__init__()
self.pattern = pattern
def accepts(self, filepath):
""" Returns True if the fnmatch pattern matches the filepath. """
return fnmatch_module.fnmatch(filepath, self.pattern)
class AndFilter(Filter, list):
""" Accept paths if all of it's filters accept the path. """
def __init__(self, *args):
""" Initialize the filter with the list of filters. """
list.__init__(self, args)
def accepts(self, filepath):
""" Returns True if all of the filters in this filter return True. """
return all(sub_filter.accepts(filepath) for sub_filter in self)
class OrFilter(Filter, list):
""" Accept paths if any of it's filters accept the path. """
def __init__(self, *args):
""" Initialize the filter with the list of filters. """
list.__init__(self, args)
def accepts(self, filepath):
""" Returns True if any of the filters in this filter return True. """
return any(sub_filter.accepts(filepath) for sub_filter in self)
class NotFilter(Filter):
""" Negate the accept of the specified filter. """
def __init__(self, pathfilter):
""" Initialize the filter with the filter it is to negate. """
super(NotFilter, self).__init__()
self.pathfilter = pathfilter
def accepts(self, filepath):
""" Returns True of the sub-filter returns False. """
return not self.pathfilter.accepts(filepath)
class DotDirectoryFilter(AndFilter):
""" Do not accept a path for a directory that begins with a period. """
def __init__(self):
"""
Initialise the filter to ignore directories beginning with
a period.
"""
super(DotDirectoryFilter, self).__init__(
DirectoryFilter(), RegexFilter(r".*%s*\..*$" % (os.sep))
)
class SizeFilter(FileFilter):
def __init__(self, max_bytes=None, min_bytes=None):
self.file_filter = FileFilter()
self.max_bytes = max_bytes
self.min_bytes = min_bytes
def accepts(self, filepath):
if super(SizeFilter, self).accepts(filepath):
stat = os.stat(filepath)
if self.max_bytes is not None:
if stat.st_size > self.max_bytes:
return False
if self.min_bytes is not None:
if stat.st_size < self.min_bytes:
return False
return True
return False
class ImageFilter(Filter):
""" Accept paths for Image files. """
def __init__(self):
self.file_filter = OrFilter(
FnmatchFilter("*.jpg"),
FnmatchFilter("*.jpeg"),
FnmatchFilter("*.png"),
FnmatchFilter("*.gif"),
FnmatchFilter("*.bmp"),
FnmatchFilter("*.tiff"),
)
def accepts(self, filepath):
return self.file_filter.accepts(filepath)
class ImageDimensionFilter(ImageFilter):
""" Accept paths for Image files. """
def __init__(
self, max_width=None, max_height=None, min_width=None, min_height=None
):
super(ImageDimensionFilter, self).__init__()
if min_height is None:
min_height = 0
if min_width is None:
min_width = 0
self.max_width = max_width
self.max_height = max_height
self.min_width = min_width
self.min_height = min_height
def accepts(self, filepath):
if super(ImageDimensionFilter, self).accepts(filepath):
if (
self.min_height == 0
and self.min_width == 0
and self.max_height is None
and self.max_width is None
):
return True
from PIL import Image
image = Image.open(filepath)
size = image.size
if self.max_width and size[0] > self.max_width:
return False
if self.max_height and size[1] > self.max_height:
return False
if self.min_width and size[0] < self.min_width:
return False
if self.min_height and size[1] < self.min_height:
return False
return True
return False
class GreyscaleImageFilter(ImageFilter):
def accepts(self, filepath):
if super(GreyscaleImageFilter, self).accepts(filepath):
from PIL import Image
from PIL import ImageStat
image = Image.open(filepath)
palette = image.getpalette()
if palette:
# GIF support
return is_greyscale_palette(palette)
stat = ImageStat.Stat(image)
# B&W JPEG: 8-bit pixels, black and white
if image.mode == "L":
return True
# if the standard deviation of the mean is less than 1 we say it's a greyscale image
# where mean = average (arithmetic mean) pixel level for each band in the image.
# note we ignore alpha bands here
return stdv(stat.mean[:3]) < 1
return False
class ColorImageFilter(ImageFilter):
def accepts(self, filepath):
if super(ColorImageFilter, self).accepts(filepath):
from PIL import Image
from PIL import ImageStat
image = Image.open(filepath)
palette = image.getpalette()
if palette:
# GIF SUPPORT
return is_color_palette(palette)
stat = ImageStat.Stat(image)
# B&W JPEG: 8-bit pixels, black and white
if image.mode == "L":
return False
# if the standard deviation of the mean is more than 1 we say it's a color image
# where mean = average (arithmetic mean) pixel level for each band in the image.
# note we ignore alpha bands here
return stdv(stat.mean[:3]) > 1
return False
def stdv(band_means):
"""Calculate the standard deviation of the image bands."""
num_bands, _sum, mean, std = len(band_means), sum(band_means), 0, 0
mean = _sum / float(num_bands)
sum_diff = sum((a - mean) ** 2 for a in band_means)
std = sqrt(sum_diff / float(num_bands - 1))
return std
def is_greyscale_palette(palette):
"""Return whether the palette is greyscale only."""
for i in range(256):
j = i * 3
if palette[j] != palette[j + 1] != palette[j + 2]:
return False
return True
def is_color_palette(palette):
"""Return whether the palette has color."""
return not is_greyscale_palette(palette)
```
#### File: pathfinder/pathfinder/__init__.py
```python
import os
from pathfinder import filters
def walk_and_filter(filepath, pathfilter, ignore=None, abspath=None, depth=None):
"""Walk the file tree and filter it's contents."""
if not os.path.exists(filepath):
raise EnvironmentError(filepath)
return list(walk_and_filter_generator(filepath, pathfilter, ignore, abspath, depth))
def walk_and_filter_generator( # noqa:C901
filepath, pathfilter, ignore=None, abspath=None, depth=None
):
"""
Walk the file tree and filter it's contents.
To ignore any paths an specify an ignore filter.
To return absolute paths pass True for the abspath parameter.
To limit how deep into the tree you travel, specify the depth parameter.
"""
# by default no depth limit is enforced
if depth is None:
depth = -1
else:
depth = int(depth)
if abspath is None:
abspath = False
if os.path.isdir(filepath):
base_path = os.path.normpath(filepath)
else:
base_path = os.path.normpath(os.path.dirname(filepath))
for root, dirs, files in os.walk(base_path):
# descend the tree to a certain depth
level = len(root.split(base_path)[1].split(os.sep))
if level > depth and depth != -1:
break
# process in order
dirs.reverse()
ignored = []
for adir in dirs:
dirpath = os.path.normpath(os.path.join(root, adir))
if ignore and ignore.accepts(dirpath):
ignored.append(adir)
continue
if pathfilter.accepts(dirpath):
if abspath:
hit_path = os.path.abspath(dirpath)
else:
hit_path = os.path.join(base_path, dirpath)
yield hit_path
# remove the dirs we are ignoring
for adir in ignored:
dirs.remove(adir)
for afile in files:
filepath = os.path.normpath(os.path.join(root, afile))
if ignore and ignore.accepts(filepath):
continue
if pathfilter.accepts(filepath):
if abspath:
filepath = os.path.abspath(filepath)
yield filepath
def find_paths(
directory_path,
just_dirs=None,
just_files=None,
regex=None,
fnmatch=None,
filter=None, # skipcq: PYL-W0622
ignore=None,
abspath=None,
depth=None,
):
"""Find paths in the tree rooted at filepath."""
if just_dirs:
path_filter = filters.DirectoryFilter()
elif just_files:
path_filter = filters.FileFilter()
elif regex:
path_filter = filters.RegexFilter(regex)
elif fnmatch:
path_filter = filters.FnmatchFilter(fnmatch)
elif not filter:
path_filter = filters.AlwaysAcceptFilter()
else:
path_filter = filter
return walk_and_filter(directory_path, path_filter, ignore, abspath, depth)
```
#### File: pathfinder/tests/test.py
```python
import os
import unittest
from pathfinder import find_paths
from pathfinder import walk_and_filter
from pathfinder.filters import (
SizeFilter,
DirectoryFilter,
FileFilter,
RegexFilter,
AndFilter,
OrFilter,
NotFilter,
FnmatchFilter,
DotDirectoryFilter,
ImageDimensionFilter,
ImageFilter,
ColorImageFilter,
GreyscaleImageFilter,
)
BASEPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
class FindTest(unittest.TestCase):
def test_just_dirs(self):
""" Test just_dirs parameter."""
# only find directories
paths = find_paths(BASEPATH, just_dirs=True)
self.assertEqual(5, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
# use Filter.find
paths_2 = DirectoryFilter().find(BASEPATH)
self.assertEqual(paths, paths_2)
def test_just_files(self):
""" Test just_files parameter."""
# only find files
paths = find_paths(BASEPATH, just_files=True)
self.assertEqual(17, len(paths))
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.jpg") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "transparent_gs.png") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", "file8") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", ".file9") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4", "file10") in paths)
# use Filter.find
paths_2 = FileFilter().find(BASEPATH)
self.assertEqual(paths, paths_2)
def test_regex(self):
""" Test regex parameter."""
# find all files and directories
paths = find_paths(BASEPATH, regex=".*")
self.assertEqual(22, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.jpg") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "transparent_gs.png") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", "file8") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", ".file9") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4", "file10") in paths)
# use Filter.find
paths_2 = RegexFilter(".*").find(BASEPATH)
self.assertEqual(paths, paths_2)
# find only files and directories with a t in the extension
paths = find_paths(BASEPATH, regex=r".*\..*t.*$")
self.assertEqual(6, len(paths))
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
# find only files and directories with 1 anywhere in the path
paths = find_paths(BASEPATH, regex=".*1.*")
self.assertTrue(7, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4", "file10") in paths)
def test_fnmatch(self):
""" Test fnmatch parameter."""
# find all files and directories
paths = find_paths(BASEPATH, fnmatch="*")
self.assertEqual(22, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.jpg") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "transparent_gs.png") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", "file8") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", ".file9") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4", "file10") in paths)
# find only files or directories with a .txt extension
paths = find_paths(BASEPATH, fnmatch="*.txt")
self.assertEqual(4, len(paths))
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
def test_all(self):
""" Test with no parameters. """
# find all paths
paths = find_paths(BASEPATH)
self.assertEqual(22, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.jpg") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "transparent_gs.png") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", "file8") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", ".file9") in paths)
def test_and(self):
""" Test AndFilter."""
# find directories with a 2 anywhere in the path
filt = AndFilter(DirectoryFilter(), RegexFilter(".*2.*"))
paths = find_paths(BASEPATH, filter=filt)
self.assertEqual(1, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
# test overridden __and__
filt = DirectoryFilter() & RegexFilter(".*2.*")
paths_2 = find_paths(BASEPATH, filter=filt)
self.assertEqual(paths, paths_2)
# use Filter.find
paths_3 = AndFilter(DirectoryFilter(), RegexFilter(".*2.*")).find(BASEPATH)
self.assertEqual(paths, paths_3)
def test_or(self):
""" Test OrFilter."""
# find all directories and any files (or directories)
# with 2 in the path
filt = OrFilter(DirectoryFilter(), RegexFilter(".*2.*"))
paths = find_paths(BASEPATH, filter=filt)
self.assertEqual(8, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
# test overridden __or__
filt = DirectoryFilter() | RegexFilter(".*2.*")
paths_2 = find_paths(BASEPATH, filter=filt)
self.assertEqual(paths, paths_2)
# use Filter.find
paths_3 = OrFilter(DirectoryFilter(), RegexFilter(".*2.*")).find(BASEPATH)
self.assertEqual(paths, paths_3)
def test_not(self):
""" Test NotFilter."""
# find all files and directories with a .txt extension
# except ones that end in 3.txt
filt = AndFilter(NotFilter(FnmatchFilter("*3.txt")), FnmatchFilter("*.txt"))
paths = find_paths(BASEPATH, filter=filt)
self.assertEqual(3, len(paths))
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
def test_ignore(self):
""" Test ignore parameter."""
# find all directories and all files and directories
# with a 2 in the path and no directories that begin
# with a dot
filt = OrFilter(DirectoryFilter(), RegexFilter(".*2.*"))
ignore = DotDirectoryFilter()
paths = find_paths(BASEPATH, filter=filt, ignore=ignore)
self.assertEqual(7, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
filt = FnmatchFilter("*.txt")
ignore = FnmatchFilter("*4.txt")
all_paths = find_paths(BASEPATH, filter=filt)
self.assertEqual(4, len(all_paths))
self.assertTrue("4.txt" in " ".join(all_paths))
ignore_paths = find_paths(BASEPATH, filter=filt, ignore=ignore)
self.assertEqual(3, len(ignore_paths))
self.assertFalse("4.txt" in " ".join(ignore_paths))
def test_abspath(self):
""" Make sure all paths are absolute paths."""
cwd = os.getcwd()
paths = find_paths(BASEPATH, filter=DirectoryFilter(), abspath=True)
self.assertEqual(5, len(paths))
self.assertTrue(os.path.join(cwd, BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(cwd, BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(cwd, BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(cwd, BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(cwd, BASEPATH, ".dir4") in paths)
paths = find_paths(BASEPATH, just_files=True, abspath=True)
self.assertEqual(17, len(paths))
self.assertTrue(os.path.join(cwd, BASEPATH, "python_logo.png") in paths)
def test_depth(self):
""" Only descend a certain depth into a tree."""
paths = find_paths(BASEPATH, filter=DirectoryFilter(), depth=1)
self.assertEqual(4, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
paths = find_paths(BASEPATH, filter=DirectoryFilter(), depth=2)
self.assertEqual(5, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
def test_size(self):
""" Find files based on size criteria. """
# all files except the image files are less than 10 bytes
p_filter = SizeFilter(max_bytes=0)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(11, len(paths))
# only the image files contain data
p_filter = SizeFilter(min_bytes=1)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(6, len(paths))
# three images between 450 bytes and 9000
p_filter = SizeFilter(min_bytes=450, max_bytes=9000)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(3, len(paths))
def test_image(self):
""" Find all images. """
image_filter = ImageFilter()
paths = walk_and_filter(BASEPATH, image_filter)
self.assertEqual(6, len(paths))
def test_find_filepath(self):
""" Test when the root path to a find is a file and not a directory. """
a_paths = find_paths(os.path.join(BASEPATH, "python_logo.png"), just_files=True)
b_paths = find_paths(BASEPATH, just_files=True)
self.assertEqual(a_paths, b_paths)
try:
import PIL
def test_image_dimension(self):
""" Find images based on dimensions. """
p_filter = ImageDimensionFilter(
max_width=1000, max_height=1000, min_height=20, min_width=20
)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(6, len(paths))
# ignore the 24x24
p_filter = ImageDimensionFilter(
max_width=1000, max_height=1000, min_height=25, min_width=25
)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(5, len(paths))
# no 24x24, but only check it based on height
p_filter = ImageDimensionFilter(min_height=25)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(5, len(paths))
# only the 24x24
p_filter = ImageDimensionFilter(max_width=24, max_height=24)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(1, len(paths))
# only the 24x24, but only check it based on height
p_filter = ImageDimensionFilter(max_height=24)
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(1, len(paths))
# no parameters - all images
p_filter = ImageDimensionFilter()
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(6, len(paths))
def test_bw_image(self):
""" Find all grey scale images. """
p_filter = GreyscaleImageFilter()
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(4, len(paths))
def test_color_image(self):
""" Find all color images. """
p_filter = ColorImageFilter()
paths = walk_and_filter(BASEPATH, p_filter)
self.assertEqual(2, len(paths))
except ImportError:
pass
def test_generator(self):
""" Test with no parameters. """
# find all paths
paths = []
for path in find_paths(BASEPATH):
paths.append(path)
self.assertEqual(22, len(paths))
self.assertTrue(os.path.join(BASEPATH, "dir1") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "subdirectory") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3") in paths)
self.assertTrue(os.path.join(BASEPATH, ".dir4") in paths)
self.assertTrue(os.path.join(BASEPATH, "file1.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "file2.dat") in paths)
self.assertTrue(os.path.join(BASEPATH, "file3.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.gif") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.jpg") in paths)
self.assertTrue(os.path.join(BASEPATH, "python_logo_gs.png") in paths)
self.assertTrue(os.path.join(BASEPATH, "transparent_gs.png") in paths)
self.assertTrue(
os.path.join(BASEPATH, "dir1", "subdirectory", "sub.txt") in paths
)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file4.txt") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir1", "file5.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file6.log") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir2", "file7.html") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", "file8") in paths)
self.assertTrue(os.path.join(BASEPATH, "dir3", ".file9") in paths)
def test_path_does_not_exist(self):
"""Test when the parameter is a non-existent path."""
# only find directories
with self.assertRaises(EnvironmentError):
find_paths(
os.path.join(os.path.dirname(BASEPATH), "doesnotexist"), just_dirs=True
)
``` |
{
"source": "jkeyes/python-docraptor",
"score": 4
} |
#### File: python-docraptor/example/basic_url.py
```python
from docraptor import DocRaptor
def main():
"""Generate a PDF with specified url."""
docraptor = DocRaptor()
print("Create test_basic_url.pdf")
with open("test_basic_url.pdf", "wb") as pdf_file:
pdf_file.write(
docraptor.create(
{"document_url": "http://docraptor.com", "test": True}
).content
)
if __name__ == "__main__":
main()
``` |
{
"source": "jkeys-nmsu/bioinformatics-scripts",
"score": 3
} |
#### File: jkeys-nmsu/bioinformatics-scripts/compute-fold-values.py
```python
import sys
#constants
DEBUG = True
INPUT_FILE_NAME_TRAIN = "ALL_AML_gr.thr.train.csv"
OUTPUT_FILE_NAME_FOLD_VALUES = "ALL_AML_gr_no_one_folds.thr.train.csv"
OUTPUT_FILE_NAME_GENE_DISTRIBUTION = "ALL_AML_gr.distribution.train.txt"
FOLD_DIFF_LT_2 = 'LT2' #Val <= 2
FOLD_DIFF_2_4 = '2_4' #2 < Val <= 4
FOLD_DIFF_4_8 = '4-8' #4 < Val <= 8
FOLD_DIFF_8_16 = '8-16' #4 < Val <= 8
FOLD_DIFF_16_32 = '16-32' #...
FOLD_DIFF_32_64 = '32-64'
FOLD_DIFF_64_128 = '64-128'
FOLD_DIFF_128_256 = '128-256'
FOLD_DIFF_256_512 = '256-512'
FOLD_DIFF_GT_512 = 'GT512'
#globals
countFoldDiffPerRange = {FOLD_DIFF_LT_2 : 0, FOLD_DIFF_2_4 : 0, FOLD_DIFF_4_8 : 0, FOLD_DIFF_8_16 : 0, FOLD_DIFF_16_32 : 0, FOLD_DIFF_32_64 : 0, FOLD_DIFF_64_128 : 0, FOLD_DIFF_128_256 : 0, FOLD_DIFF_256_512 : 0, FOLD_DIFF_GT_512 : 0}
def debug(logMsg):
if DEBUG:
print(logMsg)
def computeFoldValues(input_file_name, output_file_name, log_file_name):
with open(input_file_name) as f:
resultLines = [] #will need to remove all genes with fold values of 1, so store a list of all genes which do not have that value
gnuplotLines = []
genesWithOneFoldRatio = {}
geneFoldValues = {}
#create a list of lines, stripped of the newline
content = [line.rstrip('\n') for line in f]
#open the file which will have the lines without one ratios; we don't want to append
out_file = open(output_file_name, "w")
#open the file which will have the lines without one ratios; we don't want to append
out_file_gnuplot = open(log_file_name, "w")
#just add the first line back to the result lines
idLine = content.pop(0)
resultLines.append(idLine + "\n") #writelines requires newlines
#for every line, compute the fold difference
for line in content:
if len(line) <= 2:
continue
#we need every integer
int_strings = line.split(',')
#the first value is the name of the gene
geneName = int_strings.pop(0)
#set the max and min to the opposite end of the range
maxVal = 20
minVal = 16000
#compute the maxima and minima of each gene
for val in int_strings:
val_int = int(val)
if(val_int >= maxVal):
maxVal = val_int
if(val_int <= minVal):
minVal = val_int
#compute the fold difference
currFoldDiff = maxVal / minVal
#if maxVal eq minVal, then it has a ratio of one
if maxVal == minVal:
genesWithOneFoldRatio[geneName] = currFoldDiff
else:
resultLines.append(line + "\n")
#add the computed fold difference to its range
if currFoldDiff <= 2:
countFoldDiffPerRange[FOLD_DIFF_LT_2] += 1
elif currFoldDiff > 2 and currFoldDiff <= 4:
countFoldDiffPerRange[FOLD_DIFF_2_4] += 1
elif currFoldDiff > 4 and currFoldDiff <= 8:
countFoldDiffPerRange[FOLD_DIFF_4_8] += 1
elif currFoldDiff > 8 and currFoldDiff <= 16:
countFoldDiffPerRange[FOLD_DIFF_8_16] += 1
elif currFoldDiff > 16 and currFoldDiff <= 32:
countFoldDiffPerRange[FOLD_DIFF_16_32] += 1
elif currFoldDiff > 32 and currFoldDiff <= 64:
countFoldDiffPerRange[FOLD_DIFF_32_64] += 1
elif currFoldDiff > 64 and currFoldDiff <= 128:
countFoldDiffPerRange[FOLD_DIFF_64_128] += 1
elif currFoldDiff > 128 and currFoldDiff <= 256:
countFoldDiffPerRange[FOLD_DIFF_128_256] += 1
elif currFoldDiff > 256 and currFoldDiff <= 512:
countFoldDiffPerRange[FOLD_DIFF_256_512] += 1
else:
countFoldDiffPerRange[FOLD_DIFF_GT_512] += 1
#store the fold difference in the dictionary
geneFoldValues[geneName] = currFoldDiff
#end for line in content
#find the largest and smallest fold diffs; also compute the range
largestFoldDiff = -1
smallestFoldDiff = 16000000
for key, value in geneFoldValues.items():
geneName = key
if(value >= largestFoldDiff):
largestFoldDiff = value
if(value <= smallestFoldDiff):
smallestFoldDiff = value
#now find the number of genes which have these values and record them
numGenesWithLargestFoldDiff = 0
numGenesWithSmallestFoldDiff = 0
for key, value in geneFoldValues.items():
if(value == largestFoldDiff):
numGenesWithLargestFoldDiff += 1
if(value == smallestFoldDiff):
numGenesWithSmallestFoldDiff += 1
for key, value in countFoldDiffPerRange.items():
gnuplotLines += key + "\t" + str(value) + "\n"
#end with open
debug("largestFoldDiff: " + str(largestFoldDiff))
debug("smallestFoldDiff: " + str(smallestFoldDiff))
debug("numGenesWithLargestFoldDiff" + str(numGenesWithLargestFoldDiff))
debug("numGenesWithSmallestFoldDiff" + str(numGenesWithSmallestFoldDiff))
# debug("geneFoldValues (dictionary):\n" + str(geneFoldValues))
# debug("genesWithOneFoldRatio (dictionary):\n" + str(genesWithOneFoldRatio))
# debug("countFoldDiffPerRange (dictionary):\n" + str(countFoldDiffPerRange))
out_file.writelines(resultLines)
out_file_gnuplot.writelines(gnuplotLines)
return geneFoldValues
#end computeFoldVals
geneFoldValuesMain = computeFoldValues(INPUT_FILE_NAME_TRAIN, OUTPUT_FILE_NAME_FOLD_VALUES, OUTPUT_FILE_NAME_GENE_DISTRIBUTION)
``` |
{
"source": "jkfids/cross-correlation",
"score": 2
} |
#### File: cross-correlation/code/animation.py
```python
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
from time import time
# Local modules
from crosscorrelation import norm_crosscorr, spectral_crosscorr
# Set parameters and plot axes
length = 100
plt.rcParams.update({'font.size': 7})
fig1, (ax1, ax2, ax3) = plt.subplots(3, 1, dpi=144)
fig1.tight_layout(h_pad=0, rect=[-0.025, -0.05, 1.015, 0.98])
line1, = ax1.plot([], [], lw=2, color='tab:blue')
line2, = ax2.plot([], [], lw=2, color='tab:red')
line3, = ax3.plot([], [], lw=2, color='purple')
ax1.set_xlim([0, 1])
ax1.set_ylim([-1.5, 1.5])
ax2.set_xlim([0, 1])
ax2.set_ylim([-1.5, 1.5])
ax3.set_xlim([-.5, .5])
ax3.set_ylim([-1.2, 1.2])
ax1.set_xticks([])
ax2.set_xticks([])
ax3.set_xticks([])
ax1.set_yticks([])
ax2.set_yticks([])
ax3.set_yticks([-1, 0, 1])
ax1.title.set_text('f')
ax2.title.set_text('g')
ax3.title.set_text('Cross-Correlation (f\u22C6g)')
x = np.linspace(0, 1, length)
y = np.sin(5*2*np.pi*x)
x = np.linspace(0, 1, round(1.5*length))
y = np.pad(y, (round(length/4), round(length/4)))
lenx = len(x)
leny = len(y)
y_slide = np.pad(y, (lenx-1, lenx-1))
leny_slide = len(y_slide)
x_R = np.linspace(-.5, .5, 2*lenx-1)
R = norm_crosscorr(y, y)
# Generate animations and save to output folder
def init():
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
return line1, line2, line3,
def animate(i):
y_subset = y_slide[leny_slide-i-1-lenx:leny_slide-i-1]
line1.set_data(x, y)
line2.set_data(x, y_subset)
line3.set_data(x_R[1:i+1], R[1:i+1])
return line1, line2, line3,
start = time()
anim = FuncAnimation(fig1, animate, init_func=init,
frames=len(R)-1, interval=50, blit=True)
anim.save('output/crosscorrelation.gif', writer='ffmpeg')
end = time()
print(f'Time elapsed (animation): {round(end - start, 2)}s')
# Plot and save static versions
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
ax1.plot(x, y, lw=2, color='tab:blue')
ax2.plot(x, y, lw=2, color='tab:red')
ax3.plot(x_R, R, lw=2, color='purple')
fig1.savefig('output/sinewave_correlation.png')
# Spectral cross-correlation
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
ax1.plot(x, y, lw=2, color='tab:blue')
ax2.plot(x, y, lw=2, color='tab:red')
x_R = np.linspace(-.5, .5, lenx)
spectral_R = spectral_crosscorr(y, y)
ax3.clear()
ax3.plot(x_R, spectral_R, lw=2, color='purple')
ax3.set_xlim([-.5, .5])
ax3.set_ylim([-1.2, 1.2])
ax3.set_xticks([])
ax3.set_yticks([-1, 0, 1])
ax3.title.set_text('Spectral Cross-Correlation (f\u22C6g)')
fig1.savefig('output/sinewave_spectral_correlation.png')
```
#### File: cross-correlation/code/stockcorrelation.py
```python
from time import time
import pandas as pd
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from numba import njit
# Local modules
from crosscorrelation import norm_corr
# Colour dictionary for category colours
colordict = {
'Health Care': 'tab:red',
'Industrials': 'tab:gray',
'Consumer Discretionary': 'tab:green',
'Information Technology': 'tab:blue',
'Consumer Staples': 'tab:orange',
'Utilities': 'tab:pink',
'Financials': 'darkgray',
'Materials': 'tab:cyan',
'Real Estate': 'tab:olive',
'Energy': 'tab:brown',
'Communication Services': 'tab:purple'
}
@njit
def norm_corr2(f, g):
"""
Normalised correlation function that adjusts length of input vectors if
they are not equal length
"""
try:
return norm_corr(f, g)
except:
size = min(f.size, g.size)
return norm_corr(f[-size:], g[-size:])
class StockCorr:
"""Stock correlation network class"""
def __init__(self, companies):
self.companies = pd.DataFrame(
companies, columns=['Symbol', 'Name', 'Category'])
self.companies = self.companies.sort_values(
by='Symbol', ignore_index=True)
self.categories = self.companies['Category'].unique().tolist()
self.symbols = self.companies['Symbol'].tolist()
self.pricehistdict = {}
self.max_pricehist_len = 0
self.categ_pricehistdict = {}
self.corrmatrix = np.array([])
self.categ_corrmatrix = np.array([])
self.corrmatrixdf = pd.DataFrame()
self.categ_corrmatrixdf = pd.DataFrame()
self.stocknetwork = nx.Graph()
def gen_pricehistdict(self, pricetype='Close'):
"""
Generate a dictionary of price histories from csv data where symbols
are keys and values are price histories
"""
for symb in self.symbols:
df = pd.read_csv(f"data/stocks/{symb}.csv").dropna()
pricehist = df[pricetype].to_numpy()
self.pricehistdict[symb] = pricehist
# Maximum price history length
if pricehist.size > self.max_pricehist_len:
self.max_pricehist_len = pricehist.size
return self.pricehistdict
def calc_corrmatrix(self):
"""
Calculate the correlation matrix between all stocks
"""
N = len(self.symbols)
self.corrmatrix = np.zeros((N, N), dtype=np.float64)
np.fill_diagonal(self.corrmatrix, 1)
# Iterate over each unique pair of stocks
for i, symb1 in enumerate(self.symbols):
for j, symb2 in enumerate(self.symbols):
if i < j:
hist1 = self.pricehistdict[symb1]
hist2 = self.pricehistdict[symb2]
self.corrmatrix[i, j] = norm_corr2(hist1, hist2)
self.corrmatrix += self.corrmatrix.T - np.identity(N)
index = pd.MultiIndex.from_frame(
self.companies[['Category', 'Symbol']])
self.corrmatrixdf = pd.DataFrame(
self.corrmatrix, index=index, columns=index)
return self.corrmatrix, self.corrmatrixdf
def calc_categ_corrmatrix(self):
"""
Calculate the correlation matrix between category averages
"""
N = len(self.categories)
self.categ_corrmatrix = np.zeros((N, N), dtype=np.float64)
np.fill_diagonal(self.categ_corrmatrix, 1)
# Calculate the compound average returns for each category
for categ in self.categories:
companies = self.companies[self.companies['Category'] == categ]
total = np.zeros(self.max_pricehist_len)
for symb in companies['Symbol'].to_list():
hist = self.pricehistdict[symb]
ones = np.ones(self.max_pricehist_len)
ones[-hist.size:] = hist/hist[0]
total += ones
self.categ_pricehistdict[categ] = total/len(companies)
# Iterate over each unique pair of categories to calculate corr matrix
for i, categ1 in enumerate(self.categories):
for j, categ2 in enumerate(self.categories):
if i < j:
hist1 = self.categ_pricehistdict[categ1]
hist2 = self.categ_pricehistdict[categ2]
self.categ_corrmatrix[i, j] = norm_corr2(hist1, hist2)
self.categ_corrmatrix += self.categ_corrmatrix.T - np.identity(N)
self.categ_corrmatrixdf = pd.DataFrame(self.corrmatrix,
index=self.companies,
columns=self.companies)
return self.categ_corrmatrix, self.categ_corrmatrixdf
def gen_stocknetwork(self):
"""
Generate the stock correlation network as a minimum spanning tree
"""
G = nx.Graph()
# Iterate over every unique combination of stocks
for i, symb1 in enumerate(self.symbols):
for j, symb2 in enumerate(self.symbols):
if i < j:
weight = np.sqrt(2*(1 - self.corrmatrix[i][j]))
G.add_edge(symb1, symb2, weight=weight)
T = nx.minimum_spanning_tree(G)
self.stocknetwork = T
return self.stocknetwork
def draw_corrmatrix(self, mode='stock'):
"""Draw either the stock or category correlation matrix"""
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
if mode == 'stock':
im = ax.imshow(self.corrmatrix, vmin=-1, vmax=1)
fig.colorbar(im, fraction=0.046, pad=0.04)
ax.set_xlabel('Companies')
ax.set_ylabel('Companies')
return fig
elif mode == 'category':
im = ax.imshow(self.categ_corrmatrix, vmin=0, vmax=1)
ax.set_xticks(range(len(self.categories)))
ax.set_yticks(range(len(self.categories)))
ax.set_xticklabels(self.categories, rotation=45,
horizontalalignment='right', fontsize=8)
ax.set_yticklabels(self.categories, fontsize=8)
# Annotate grid
for i in range(len(self.categories)):
for j in range(len(self.categories)):
ax.text(j, i, round(self.categ_corrmatrix[i, j], 2),
ha="center", va="center", color="k", fontsize=8)
fig.colorbar(im, fraction=0.046, pad=0.04)
return fig
def draw_network(self, colordict=colordict):
"""Draw the stock correlation network in the kamada kawai layout"""
node_color = []
for category in self.companies['Category'].to_list():
node_color.append(colordict[category])
edge_color = [self.stocknetwork[u][v]['weight']
for u, v in self.stocknetwork.edges]
fig, ax = plt.subplots(dpi=200, figsize=(8, 6))
pos = nx.kamada_kawai_layout(self.stocknetwork)
# Draw nodes with colordict
nodes = nx.draw_networkx_nodes(self.stocknetwork, pos,
node_color=node_color,
node_size=10, ax=ax)
# Draw edges with colormap
edges = nx.draw_networkx_edges(self.stocknetwork, pos,
edge_color=edge_color,
edge_cmap=plt.cm.viridis,
alpha=0.7, ax=ax)
# Create legend
handles = []
for key, value in colordict.items():
handles.append(Line2D([0], [0], marker='o', color='w', label=key,
markerfacecolor=value, markersize=7.5))
fig.colorbar(edges, fraction=0.025, pad=0.01)
ax.set_axis_off()
ax.legend(handles=handles, ncol=2, fontsize=7.5)
return fig
if __name__ == "__main__":
# Load generated stock data from csv file and create class
print('Loading stock data...')
infodf = pd.read_csv("data/S&P500_info.csv")
companies = infodf[['Symbol', 'Security', 'GICS Sector']].values.tolist()
sp500 = StockCorr(companies)
sp500.gen_pricehistdict()
# Calculate and plot the cross-correlation matrix
print('Calculating stock cross-correlation matrix...')
start = time()
sp500.calc_corrmatrix()
end = time()
print(f'Time elapsed: {round(end - start, 2)}s')
fig1 = sp500.draw_corrmatrix('stock')
fig1.tight_layout()
fig1.savefig("output/stock_corrmatrix")
# Calculate and plot the cross-correlation matrix
print('Calculating category cross-correlation matrix...')
start = time()
sp500.calc_categ_corrmatrix()
end = time()
print(f'Time elapsed: {round(end - start, 2)}s')
fig2 = sp500.draw_corrmatrix('category')
fig2.tight_layout()
fig2.savefig("output/categ_corrmatrix")
# Generate the stock correlation network as a minimum spanning tree
print('Generating stock correlation network...')
start = time()
T = sp500.gen_stocknetwork()
end = time()
print(f'Time elapsed: {round(end - start, 2)}s')
# Draw and save the network
print('Drawing graph...')
node_color = []
fig3 = sp500.draw_network()
fig3.tight_layout()
fig3.savefig("output/stocknetwork.png")
total_weight = T.size(weight='weight')
print(f'Normalised Tree Length: {total_weight/T.size()}')
``` |
{
"source": "jkfids/forest-fire",
"score": 3
} |
#### File: jkfids/forest-fire/analysis.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from matplotlib import colors
from matplotlib.animation import FuncAnimation
from time import time
# Import ForestFire class
from forestfire import ForestFire
def animate_forest(forest, interval=100, frames=200, name='forestfire.gif'):
"""Animate a forest fire for a given number of frames (i.e. timesteps)"""
start = time()
cmap = colors.ListedColormap(['red', 'black', 'green'])
bounds = [-1, -0.5, 0.5, 1]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig, ax = plt.subplots()
ax.axis('off')
fig = plt.figure(frameon=False)
fig.set_size_inches(10,10)
ax = plt.Axes(fig, [0., 0., 1., 1.])
fig.add_axes(ax)
def init_frame():
ax.imshow(forest.grid, cmap=cmap, norm=norm, aspect='auto')
def animate(i):
plt.cla()
ax.imshow(forest.grid, cmap=cmap, norm=norm, aspect='auto')
forest.step()
#print(f'frame {i}')
anim = FuncAnimation(fig, animate, init_func=init_frame, interval=interval, frames=frames)
anim.save('animations/' + name)
end = time()
print(f'Time elapsed: {round((end - start), 2)} seconds')
def plot_fractionvt(forest, t_max, plot_green=True):
"""Plot fraction of green and red vs t"""
fig, ax = plt.subplots()
ax.set_xlabel('Time')
ax.set_ylabel('Grid State Fractions')
ax.grid(True)
props = dict(boxstyle='square', facecolor='white')
textbox = (
f'L = {forest.height}\n'
f'p = {forest.p}\n'
f'f = {forest.f}'
)
ax.text(0.865, 0.965, textbox, transform=ax.transAxes, fontsize=9,
verticalalignment='top', bbox=props)
forest.step(t_max)
y1 = np.array(forest.s_history)/forest.size
x = range(len(y1))
ax.plot(x, y1, color='red')
if plot_green:
y2 = np.array(forest.g_history)/forest.size
ax.plot(x, y2, color='green')
def gaussian(x, mu, sigma):
return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-(x-mu)**2/(2*sigma**2))
def plot_firesizepd(forest, t, N, p0=[0, 0, 0], fit=False):
""""Constructs a histogram of probability vs. fire size after certain time"""
start = time()
forest.step(t+N)
firesizes = forest.s_history[t:]
if fit:
bin_heights, bin_borders, _ = plt.hist(firesizes, density=True, bins='auto')
bin_centers = bin_borders[:-1] + np.diff(bin_borders)/2
popt, _ = curve_fit(gaussian, bin_centers, bin_heights, p0 = [1/200,7500,1000])
X = np.linspace(bin_borders[0], bin_borders[-1], 10000)
plt.plot(X, gaussian(X, *popt))
plt.ylabel('Probability')
plt.xlabel('Fire Size')
plt.title('Fire Size Probability Distribution')
end = time()
print(f'Time elapsed: {round((end - start), 2)} seconds')
print(f'Amplitude = {popt[0]}')
print(f'Mean = {popt[1]}')
print(f'Standard deviation = {popt[2]}')
if fit:
return popt
# Fire size pdf subplots
def plot_firesizepd_multi(forest1, forest2, forest3, t, N):
"""Plot multiple fire size probability distributions"""
start = time()
forest1.step(t[0]+N)
forest2.step(t[1]+N)
forest3.step(t[2]+N)
firesizes_history1 = forest1.s_history[t[0]:]
firesizes_history2 = forest2.s_history[t[0]:]
firesizes_history3 = forest3.s_history[t[0]:]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=144)
ax1.title.set_text(f'f = {forest1.f}, p = {forest1.p}')
ax2.title.set_text(f'f = {forest2.f}, p = {forest2.p}')
ax3.title.set_text(f'f = {forest3.f}, p = {forest3.p}')
#ax1.set_ylabel('Probability')
#fig.text(0.5, 0.05, 'Total Fire Size', ha='center')
ax1.set_ylim(top=0.00675)
ax3.set_ylim(top=0.00675)
#weights1 = np.ones(len(firesizes_history1))/len(firesizes_history1)
weights2 = np.ones(len(firesizes_history2))/len(firesizes_history2)
#weights3 = np.ones(len(firesizes_history3))/len(firesizes_history3)
bin_heights1, bin_borders1, _ = ax1.hist(firesizes_history1, density=True, bins='auto')
#bin_heights1, bin_borders1, _ = ax1.hist(firesizes_history1, weights=weights1, bins=100)
ax2.hist(firesizes_history2, weights=weights2, bins=100)
bin_heights3, bin_borders3, _ = ax3.hist(firesizes_history3, density=True, bins='auto')
#bin_heights3, bin_borders3, _ = ax3.hist(firesizes_history3, weights=weights3, bins=100)
bin_centers1 = bin_borders1[:-1] + np.diff(bin_borders1)/2
popt1, _ = curve_fit(gaussian, bin_centers1, bin_heights1, p0 = [7500, 100])
X1 = np.linspace(bin_borders1[0], bin_borders1[-1], 10000)
ax1.plot(X1, gaussian(X1, *popt1), label=f'μ = {round(popt1[0])}, σ = {round(popt1[1], 2)}')
ax1.legend(loc='upper center')
bin_centers3 = bin_borders3[:-1] + np.diff(bin_borders3)/2
popt3, _ = curve_fit(gaussian, bin_centers3, bin_heights3, p0 = [250, 50])
X3 = np.linspace(bin_borders3[0], bin_borders3[-1], 10000)
ax3.plot(X3, gaussian(X3, *popt3), label=f'μ = {round(popt3[0])}, σ = {round(popt3[1], 2)}')
ax3.legend(loc='upper center')
end = time()
fig.savefig('plots/' + 'firesizepds')
print(f'Time elapsed: {round((end - start), 2)} seconds')
return popt1, popt3
def plot_waitingtimespd_multi(forest1, forest2, forest3, t, N):
"""
Multiple plots of the probability distribution for waiting times
between fires in individual sites
"""
start = time()
forest1.step(t+N)
forest2.step(t+N)
forest3.step(t+N)
w_history1 = forest1.w_history[-100000:]
w_history2 = forest2.w_history
w_history3 = forest3.w_history[-500000:]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=144)
ax1.title.set_text(f'f = {forest1.f}, p = {forest1.p}')
ax2.title.set_text(f'f = {forest2.f}, p = {forest2.p}')
ax3.title.set_text(f'f = {forest3.f}, p = {forest3.p}')
ax1.xaxis.set_ticks(range(2, 13, 1))
ax1.set_xlim(left=2, right=12)
ax2.set_xlim(right=60)
ax3.set_xlim(right=600)
weights1 = np.ones(len(w_history1))/len(w_history1)
ax1.hist(w_history1, weights=weights1, bins=100)
ax2.hist(w_history2, density=True, bins='auto')
ax3.hist(w_history3, density=True, bins='auto')
end = time()
fig.savefig('plots/' + 'waitingtimespds')
print(f'Time elapsed: {round((end - start), 2)} seconds')
def calc_steadystate(f, p):
"""Calculate steady state fractions"""
fp1 = f*(p + 1)
root = np.sqrt(fp1**2 + 10*p*fp1 + 9*p**2)
#root = np.sqrt((fp1 + 9*p)*(fp1 + p))
x_r = (3*p - fp1 + root)/(8*(p + 1))
#x_g = (5*p + fp1 - root)/(8*p)
x_g = 1 - (p + 1)*x_r/p
return x_r, x_g
``` |
{
"source": "jkfids/PINN-burgers",
"score": 3
} |
#### File: PINN-burgers/burgers1d/analytical.py
```python
import numpy as np
import pandas as pd
from scipy import integrate
from matplotlib import pyplot as plt
# Initialize matrices/constants
vis = .01/np.pi # Viscocity term in Burger's equation
n_x = 201 # Number of u(x, t) elements on x
X = np.zeros(n_x)
Y1 = np.zeros(n_x)
Y2 = np.zeros(n_x)
Y3 = np.zeros(n_x)
Y_ani = np.zeros(n_x)
# Analytical solution for Burger's equation with periodic boundary conditions
# at x = [-1,1] and initial condition u(x,0) = -sin(pi*x)
def f_cole(y):
return np.exp(-np.cos(np.pi*y)/(2*np.pi*vis))
def integrand1(eta, x, t):
return np.sin(np.pi*(x-eta))*f_cole(x-eta)*np.exp(-eta**2/(4*vis*t))
def integrand2(eta, x, t):
return f_cole(x-eta)*np.exp(-eta**2/(4*vis*t))
def u_anal(x, t):
if t == 0:
return -np.sin(np.pi*x)
else:
I1 = integrate.quad(integrand1, -np.inf, np.inf, args=(x,t))[0]
I2 = integrate.quad(integrand2, -np.inf, np.inf, args=(x,t))[0]
return -I1/I2
# Plot u(x,t) for t1, t2, t3 over the space of X = [-1,1]
def plot_anal(t1=0, t2=0.25, t3=0.5, n_x = 201):
X = np.linspace(-1, 1, n_x)
for i in range(n_x):
Y1[i] = u_anal(X[i], t1)
Y2[i] = u_anal(X[i], t2)
Y3[i] = u_anal(X[i], t3)
fig_static = plt.figure('static')
plt.plot(X, Y1, label = f't = {t1}', linewidth=1)
plt.plot(X, Y2, label = f't = {t2}', linewidth=1)
plt.plot(X, Y3, label = f't = {t3}', linewidth=1)
plt.ylim(-1.25,1.25)
plt.legend()
plt.xlabel('x')
plt.ylabel('u(x,t)')
plt.title("Analytical Solution for Burgers' Equation in 1D")
fig_static.savefig("output/analplot1d.png")
plt.close
# Generate training data for PINN and save it to a csv file
def gen_train(max_t, set_size=100):
m = int(set_size/2)
tr_init = np.zeros([m,3])
tr_bound = np.zeros([m,3])
tr_init[:,1] = np.linspace(-1, 1, m)
tr_bound[:,1] = np.ones(m) - 2*np.random.randint(2, size=m)
tr_bound[:,2] = np.linspace(0, max_t, m)
for i in range(m):
tr_init[i][0] = u_anal(tr_init[i][1], 0)
tr_bound[i][0] = u_anal(tr_bound[i][1], tr_bound[i][2])
training = np.append(tr_init, tr_bound, axis=0)
df = pd.DataFrame(training, columns=['u(x,t)', 'x', 't'])
df.to_csv('data/train1d.csv', index=False)
``` |
{
"source": "jkfids/unimelb-projects",
"score": 4
} |
#### File: unimelb-projects/Abelian Sandpiles/sandpile.py
```python
import numpy as np
import math as m
class SandPile:
"""SandPile class
"""
def __init__(self, width, height, threshold=4):
"""Initialize a sandpile with the specified width and height."""
self.width = width
self.height = height
self.threshold = threshold
self.grid = np.zeros((width, height), dtype=int)
# Initializes a toppled grid which will be used to represent which
# sites have been affected by an avalanche
self.toppled_grid = np.zeros((width, height), dtype=int)
# Initialize various properties such as histories for avalanche
# properties, mass history and time
self.time = 0;
self.mass_history = [self.mass()]
self.grain_loss = 0
self.grain_loss_history = []
self.grain_loss_total = 0
self.topples_history = []
self.area_toppled_history = []
self.M_length_history = []
self.E_length_history = []
def drop_sand(self, n=1, site=None):
""""Add `n` grains of sand to the grid. Each grains of sand is added to
a random site.
This function also increments the time by 1 and update the internal
`mass_history`. Depending on how you want to code things, you may wish
to also run the avalanche (alternatively, the avalanching might be
executed elsewhere).
Parameters
==========
n: int
The number of grains of sand of drop at this time step. If left
unspecified, defaults to 1.
site:
The site on which the grain(s) of sand should be dropped. If `None`,
a random site is used."""
# Generates a random site when non is inputted
if site==None:
x = np.random.randint(low=0, high=self.width);
y = np.random.randint(low=0, high=self.height);
# Assumes that the site is given by 1x2 array where the elements are
# the row and column number respectively
else:
x = site[0];
y = site[1];
# Increments the site by the number of sand grains inputted
self.grid[x,y] = self.grid[x,y] + n;
# Avalanche is run so that all unstable sites are stabilized
self.avalanche();
# Appends the length histories of the avalanche using our length functions
# Also increments time and appends the mass history
self.M_length_history.append(self.M_length_max(x,y,self.toppled_grid))
self.E_length_history.append(self.E_length_max(x,y,self.toppled_grid))
self.time = self.time + 1;
self.mass_history.append(self.mass());
def mass(self):
"""Return the mass of the grid using a double sum function"""
return sum(sum(self.grid));
def area_toppled(self):
"""Return the area toppled given the toppled_grid using a double sum"""
return sum(sum(self.toppled_grid));
def M_length(self, x1, y1, x2, y2):
"""Return the Manhatten distance between sites [x,y] and [i,j]"""
return abs(x1-x2) + abs(y1-y2);
def E_length(self, x1, y1, x2, y2):
"""Return the Euclidean distance between sites [x,y] and [i,j]"""
return m.sqrt((abs(x1-x2)**2)+(abs(y1-y2))**2);
def M_length_max(self, x, y, toppled_grid):
"""Return the M distance between the furthest affected site due to an
avalanche and the grain drop site that caused the avalanche"""
length_max = 0;
# Uses a double for loop to check the distance of each affected point
# to the origin site and checks if it is the highest length
for i in range(self.width):
for j in range(self.height):
if toppled_grid[i,j] == 1:
length = self.M_length(x,y,i,j);
if length_max < length:
length_max = length;
return length_max;
def E_length_max(self, x, y, toppled_grid):
"""Return the E distance between the furthest affected site due to an
avalanche and the grain drop site that caused the avalanche"""
length_max = 0;
# Similiar to M_length_max()
for i in range(self.width):
for j in range(self.height):
if toppled_grid[i,j] == 1:
length = self.E_length(x,y,i,j);
if length_max < length:
length_max = length;
return length_max;
def topple(self, site):
"""Topple the specified site."""
# Uses same site definition as in drop_sand()
x = site[0];
y = site[1];
# Decreases the unstable site element by 4 and changes the respective
# toppled_grid element to 1
self.grid[x,y] = self.grid[x,y] - 4;
self.toppled_grid[x,y] = 1;
# Uses 4 if statements to increment each of the four adjacent sites
# by 1 and adjusts the respective grid elemenent. However if the site
# is not within the bounds of the grid then the grain loss count is
# incremented by 1.
if x < (self.width-1):
self.grid[x+1,y] = self.grid[x+1,y] + 1;
self.toppled_grid[x+1,y] = 1;
else: self.grain_loss = self.grain_loss + 1;
if x > 0:
self.grid[x-1,y] = self.grid[x-1,y] + 1;
self.toppled_grid[x-1,y] = 1;
else: self.grain_loss = self.grain_loss + 1;
if y < (self.height-1):
self.grid[x,y+1] = self.grid[x,y+1] + 1;
self.toppled_grid[x,y+1] = 1;
else: self.grain_loss = self.grain_loss + 1;
if y > 0:
self.grid[x,y-1] = self.grid[x,y-1] + 1;
self.toppled_grid[x,y-1] = 1;
else: self.grain_loss = self.grain_loss + 1;
def avalanche(self):
"""Run the avalanche causing all sites to topple and store the stats of
the avalanche in the appropriate variables.
"""
# Initializes avalanche properties that are recorded after the sandpile
# is stabilized
topples = 0;
self.grain_loss=0;
self.toppled_grid = np.zeros((self.width, self.height), dtype=int)
# While there are any unstable sites, the function uses a double for
# loop to scan through the entire grid and topple every unstable site
# Also increments the topples count each time a toppling occurs
while np.any(self.grid >= 4):
for i in range(self.width):
for j in range(self.height):
if self.grid[i,j] >= 4:
self.topple([i,j]);
topples = topples + 1;
# Appends various avalanche properties to the appropriate array histories
self.topples_history.append(topples);
self.grain_loss_history.append(self.grain_loss);
self.grain_loss_total = self.grain_loss_total + self.grain_loss;
self.area_toppled_history.append(self.area_toppled())
```
#### File: unimelb-projects/American Roulette/roulette.py
```python
import numpy as np
from random import random
from matplotlib import pyplot as plt
from time import time
from numpy.polynomial import Polynomial
f = 0.05
q = 9/19
tau = 60
x_0 = 1000
x_w = 2000
x_m = 10
class Roulette:
def __init__(self, x_0=x_0, x_w=x_w, x_m=x_m, f=f, q=q, tau=tau):
self.f = f
self.q = q
self.tau = tau
self.x = x_0
self.x_w = x_w
self.x_m = x_m
self.tau = tau
self.t = 0
def play(self):
rand = random()
if rand < self.q:
self.x += self.f*self.x
elif rand > self.q:
self.x -= self.f*self.x
self.t += self.tau
def start(self):
while (self.x > self.x_m) & (self.x < self.x_w):
self.play()
def plot_t_pdf(N, x_0=x_0, x_w=x_0, x_m=x_m, f=f, q=q, tau=tau):
start = time()
fig, ax = plt.subplots(dpi=144)
t_history = np.zeros(N, dtype=np.int32)
for i in range(N):
andrew = Roulette()
andrew.start()
t_history[i] = andrew.t
t_mean = np.mean(t_history)
bin_heights, bin_borders, _ = ax.hist(t_history, density=True, bins='auto')
bin_centers = bin_borders[:-1] + np.diff(bin_borders)/2
ax.set_xlabel('Time (s)')
ax.set_ylabel('Probability')
end = time()
fig.savefig('playtime2_pdf')
print(f'Time elapsed: {round(end-start, 2)}s')
return t_mean, t_history, bin_centers, bin_heights
t_mean, t_history, bin_centers, bin_heights = plot_t_pdf(100000)
print(f'Average playing time: {round(t_mean)}s')
```
#### File: unimelb-projects/N-Body Simulation/nbody.py
```python
import numpy as np
# 'numpy' is a Python module, which provides a library of
# numerical functions and structures for use in Python programs
# In order to
m_unit = 2.e30 # Mass unit in kg
l_unit = 1.496e11 # Length unit in m
t_unit = 86400. # Time unit in s , ie. second per day
G = 6.67e-11*m_unit*(t_unit**2)/(l_unit**3) # The gravitational constant in rescaled units
# We define constants at the start of the program
# It is good programming practice to avoid using "magic numbers"
# in your code, in case you decide later that you want to change
# the value of one of a particular constant.
# You WILL need to change the values of these constants
n_body = 4 # Number of bodies in the simulation
max_t = 50000 # Total simulation time
delta_t = 1 # Length of time step (in time units)
max_step = int(max_t/delta_t) # Maximum number of time steps
show_step = 10 # Show every nth step
# We will use a Python structure called a 'class' to define the bodies
# The class will contain the mass, positions and velocities of every
# object
class body:
def __init__(self, name, m, x, y, vx, vy, A):
self.name = name
self.mass = m
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.A = A # Albedo
self.orbits = None # Number of orbits around the Sun
self.temp = None # Surface temperature of the planet
self.phi = None # Current angle of orbit around the Sun
self.phi_i = None # Angle of orbit before the Euler update
self.phi_o = None # Original angle at time = 0
# we will also add a function to the class that displays the
# body's parameters
def show(self):
print("Body: %s"%self.name)
print("Mass = %6.2e mass units = %6.2e kg"%(self.mass, self.mass*m_unit))
print("Position = (%6.2e,%6.2e) length units"%(self.x, self.y))
print("Velocity = (%6.2e,%6.2e) velocity units"%(self.vx,self.vy))
print("Kinetic Energy = %6.2e energy units"%(self.K_energy()))
if self.temp != None:
print("Surface Temperature =", int(self.temp), "K")
if self.orbits != None:
print("Number of Orbits:", int(self.orbits))
print("")
# Calculates the distance between self and another body
def r(self, body):
return np.sqrt((self.x - body.x)**2 + (self.y - body.y)**2)
# Calculates the kinetic energy of the body
def K_energy(self):
return 0.5*self.mass*(self.vx**2 + self.vy**2)
# Calculates the gravitational potential energy between self and another body
def V_energy(self, body):
return -G*self.mass*body.mass/self.r(body)
# Calculates the surface temperature of a Planet
def calc_temp(self, Sun):
return 279*((1-self.A)**0.25)*(self.r(Sun)**-0.5)
# Now that we have defined the global variables and the body class
# we are now ready to run the simulation
# We will now define the initial conditions of the bodies and create
# them as an object (using the 'body' class) and add them to the
# 'bodies' array
# define the initial conditions of the Sun
name1 = 'Sun'
mass1 = 1
x1 = 50
y1 = 0.
vx1 = 0
vy1 = np.sqrt(G*50/x1)
A1 = None
# define the initial conditions of the Earth
name2 = 'Earth'
mass2 = 3.e-6
x2 = x1 + 1.
y2 = y1 + 0.
vx2 = vx1 + 0.
vy2 = vy1 + np.sqrt(G*mass1/(x2-x1))
A2 = 0.4
# define the initial conditions of Jupiter
name3 = 'Jupiter'
mass3 = 1/1047
x3 = x1 + 5.2
y3 = y1 + 0.
vx3 = vx1 + 0.
vy3 = vy1 + np.sqrt(G*mass1/(x3-x1))
A3 = 0.51
# define the initial conditions of the Moon
"""name4 = 'Moon'
mass4 = 0.0123 * mass2
x4 = x1 + x2 + 2.569e-3
y4 = y1 + y2 + 0.
vx4 = vx1 + vx2 + 0.
vy4 = vy1 + vy2 + np.sqrt(G*mass2/(x4-x1-x2))"""
# define the initial conditions of the black hole
name4 = 'Black Hole'
mass4 = 50
x4 = 0
y4 = 0
vx4 = 0
vy4 = 0
A4 = None
# Update the initial velocity of the Sun so the net linear momentum of the solar system = 0
vx1 -= ((vx2-vx1)*mass2 + (vx3-vx1)*mass3)/mass1
vy1 -= ((vy2-vy1)*mass2 + (vy3-vy1)*mass3)/mass1
# Does the same for the blackhole
vx4 -= vx1*mass1/mass4
vy4 -= vy1*mass1/mass4
# Define an array that will contain the body classes
# For now, the array is empty, but we will add bodies to it as needed
bodies = np.array([])
# Add the first body to the 'bodies' array
bodies = np.append(bodies, body(name1, mass1, x1, y1, vx1, vy1, A1))
bodies[0].temp = 5778 # Manually define the surface temperature of the Sun
# and the second
bodies = np.append(bodies, body(name2, mass2, x2, y2, vx2, vy2, A2))
# and the third and so on
bodies = np.append(bodies, body(name3, mass3, x3, y3, vx3, vy3, A3))
bodies = np.append(bodies, body(name4, mass4, x4, y4, vx4, vy4, A4))
# Returns the polar components of the vector between 2 Cartesian coordinates
def cart2pol(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
rho = np.sqrt(dx**2 + dy**2)
phi = np.arctan2(dy, dx)
return(rho, phi)
# Returns the total energies of the system
def system_energy(n_body=n_body, bodies=bodies):
kinetic = 0.
potential = 0.
# Utilizes a for loop to sum up the kinetic energies and a double for loop
# and an if statement to sum the potential energy between each unique pair
for i in range(0, n_body):
kinetic += bodies[i].K_energy()
for j in range(0, n_body):
if j > i:
potential += bodies[i].V_energy(bodies[j])
return (kinetic + potential, kinetic, potential)
# Initialize system/planetary properties and histories
temp_history = np.array([])
initial_energies = system_energy()
for i in range(1, n_body-1):
bodies[i].orbits = 0.
bodies[i].temp = bodies[i].calc_temp(bodies[0])
bodies[i].phi = cart2pol(bodies[0].x, bodies[0].y, bodies[i].x, bodies[i].y)[1]
bodies[i].phi_o = bodies[i].phi
bodies[i].phi_i = bodies[i].phi
temp_history = np.append(temp_history, bodies[1].temp)
# The next part of this script is a loop -- it means that the
# piece of indented code will be executed a number of times,
# as the variable i takes on values from 1 to n_body.
# Initialize and print system properties
print ("\033[4mInitial Properties\033[0m")
for i in range(0, n_body):
bodies[i].show() # Display name mass, position and velocity (as per the function 'show()' in the class 'body')
print("System:")
print("Total Kinetic = %6.3e energy units"%initial_energies[1])
print("Total Potential = %6.3e energy units"%initial_energies[2])
print("Total Energy = %6.3e energy units"%initial_energies[0])
print("")
# Open a file for each body that we will write the output to
# By using the open() function with the "w" (write) argument, a new
# file will be created and opened at the path of the first argument
f = open("outfile.csv", "w")
f.write(", ".join(["Xpos body%i, Ypos body%i"%(i+1, i+1) for i in range(n_body)])+", step, time,\n")
# We are now ready to run the n-body simulation
time = 0.
for step in range(0,max_step):
# For each step, we want to record the acceleration/force between each pair of bodies
# We create the two-dimensional array and populate it with zeros to begin
# We want to calculate this array every step, so we create it inside the loop
aFactor = np.zeros([n_body, n_body])
# Work out all the separations and determine the acceleration factor
for i in range(0,n_body):
for j in range(0,n_body):
# The "if" statements asks the computer to make a comparison
# between the current values of i and j. "!=" means not
# equal to, as we do not want to calculate a self force.
if i != j:
xsq = (bodies[i].x - bodies[j].x)**2.
ysq = (bodies[i].y - bodies[j].y)**2.
rsq = xsq + ysq
factor = rsq**-1.5
# update the acceleration factor array
aFactor[i][j] = G*bodies[j].mass*factor
aFactor[j][i] = G*bodies[i].mass*factor
# Note that when we set up the class, we do not initialize the acceleration of
# each body. So now we add a new feature to each of the bodies - the acceleration.
for i in range(0,n_body):
bodies[i].ax = 0. # Set the accelerations to 0
bodies[i].ay = 0.
# And update the acceleration for each pair of bodies
for i in range(0,n_body):
for j in range(0,n_body):
if i != j:
# For each body, calculate the acceleration vector components
bodies[i].ax -= aFactor[i][j] * (bodies[i].x - bodies[j].x)
bodies[i].ay -= aFactor[i][j] * (bodies[i].y - bodies[j].y)
# Save the initial phi values of the planets' orbit
for i in range(0, n_body):
if bodies[i].orbits != None:
bodies[i].phi_i = cart2pol(bodies[0].x, bodies[0].y, bodies[i].x, bodies[i].y)[1]
# We now update the position and velocity values for each body
for i in range(0,n_body):
# For each body, calculate the new velocity vector components
bodies[i].vx += bodies[i].ax * delta_t;
bodies[i].vy += bodies[i].ay * delta_t;
# and the new position vector components
bodies[i].x += bodies[i].vx * delta_t;
bodies[i].y += bodies[i].vy * delta_t;
# Update the phi component of the planets' orbital path then checks whether
# the body has completed an orbit
# See labnotes for more details
for i in range(0,n_body):
if bodies[i].orbits != None:
bodies[i].phi = cart2pol(bodies[0].x, bodies[0].y, bodies[i].x, bodies[i].y)[1]
if bodies[i].phi_o < bodies[i].phi and bodies[i].phi_o > bodies[i].phi_i:
bodies[i].orbits += 1
# Update the planetary temperatures and append it to the temperature history
for i in range(0,n_body):
if bodies[i].A != None:
bodies[i].temp = bodies[i].calc_temp(bodies[0])
temp_history = np.append(temp_history, bodies[1].temp)
# Update the system energies
energies = system_energy()
# We don't want to write out data every step, just every show_step
# times -- EXCEL can't handle more than about 4000 values.
if (step%show_step)==0:
for i in range(0,n_body):
## Write out the timestep, position and velocity data for each particle
f.write("%0.10f,%0.10f,"%(bodies[i].x, bodies[i].y))
f.write("%5d,%6.4f\n"%(step, time))
# update the current time
time+=delta_t
# Once the loop is finished, close the file so we can read it.
f.close()
# plot
# I also added legends and markers for the final positions of the bodies.
import matplotlib.pyplot as plt
plt.figure(figsize=(6,6), dpi=70)
plt.axis('equal') # Makes the scale of the two axis equal, so circles appear as circles
f = open("outfile.csv"); k = f.readlines(); f.close()
x_body1 = [float(i.split(',')[0]) for i in k[1:]]
y_body1 = [float(i.split(',')[1]) for i in k[1:]]
x_body2 = [float(i.split(',')[2]) for i in k[1:]]
y_body2 = [float(i.split(',')[3]) for i in k[1:]]
x_body3 = [float(i.split(',')[4]) for i in k[1:]]
y_body3 = [float(i.split(',')[5]) for i in k[1:]]
x_body4 = [float(i.split(',')[6]) for i in k[1:]]
y_body4 = [float(i.split(',')[7]) for i in k[1:]]
plt.plot(x_body1, y_body1, c='y')
plt.plot((x_body1[len(x_body1)-1]),(y_body1[len(y_body1)-1]), marker='o', c='y', label='Sun')
plt.plot(x_body2, y_body2)
plt.plot((x_body2[len(x_body2)-1]),(y_body2[len(y_body2)-1]), marker='o', c='#1f77b4', label='Earth')
plt.plot(x_body3, y_body3)
plt.plot((x_body3[len(x_body3)-1]),(y_body3[len(y_body2)-3]), marker='o', c='#ff7f0e', label='Jupiter')
plt.plot(x_body4, y_body4, c='black')
plt.plot((x_body4[len(x_body4)-1]),(y_body4[len(y_body4)-1]), marker='o', c='black', label='Black Hole')
plt.title('Black Hole vs Solar System - 50000 days (Δt = 1)')
plt.legend()
plt.show()
# Plot the surface temperature of Earth over the simulation
t_history = delta_t*np.array([i for i in range(len(temp_history))])
plt.plot(t_history, temp_history)
plt.title("Surface Temperature of Earth over Time")
plt.xlabel("Time (day)")
plt.ylabel("Temperature (K)")
# Prints properties of the system after simulation
print ("\033[4mFinal Properties\033[0m")
for i in range(0, n_body):
bodies[i].show() # Display name mass, position and velocity (as per the function 'show()' in the class 'body')
print("System:")
print("Total Kinetic = %6.3e energy units"%energies[1])
print("Total Potential = %6.3e energy units"%energies[2])
print("Total Energy = %6.3e energy units"%energies[0])
``` |
{
"source": "jkfindeisen/pylibnidaqmx",
"score": 3
} |
#### File: examples/contrib/alternate_on_off_slow.py
```python
from __future__ import division
from numpy import *
import labdaq.daqmx as daqmx
import labdaq.daq as daq
import threading,time
def min2sec(minutes):
return minutes*60.0
###### setup script parameters ########
long_duration = min2sec(1.0) # twenty minutes
duration = 25.0 # sec
onvoltage = 5.0
offvoltage = 0.0
onstate = False
gCurrentTimer = None
state_verbal= {True:'On', False: 'Off'}
def change_voltage(onstate):
print onstate
if onstate:
daq.set_voltage_ch0(onvoltage)
else :
daq.set_voltage_ch0(offvoltage)
def cycle(onstate=onstate):
print "hi! Starting up loop of alternating on voltage %f with off voltage of %f every %f seconds or %f minutes" % (onvoltage, offvoltage, duration, sec2min(duration))
while 1:
onstate = not onstate
change_voltage(onstate)
time.sleep(duration)
# gCurrentTimer = threading.Timer(duration, cycle, (onstate,))
if __name__=='__main__':
cycle()
``` |
{
"source": "jkfran/wordpress-fran",
"score": 2
} |
#### File: wordpress-fran/tests/helper.py
```python
import requests
def finish_setup(unit, user='admin', password=<PASSWORD>):
h = {'User-Agent': 'Mozilla/5.0 Gecko/20100101 Firefox/12.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*',
'Accept-Encoding': 'gzip, deflate'}
r = requests.post('http://%s/wp-admin/install.php?step=2' % unit,
headers=h, data={'weblog_title': 'Amulet Test %s' % unit,
'user_name': user, 'admin_password': password,
'admin_email': '<EMAIL>',
'admin_password2': password,
'Submit': 'Install WordPress'}, proxies=None)
r.raise_for_status()
``` |
{
"source": "jkgeo/ckan-remote-dataloader",
"score": 2
} |
#### File: ckan-remote-dataloader/ckanloader/loader.py
```python
import messytables
import itertools
from ckanapi import RemoteCKAN
from datetime import datetime
TYPE_MAPPING = {
'String': 'text',
'Integer': 'numeric',
'Decimal': 'numeric',
'DateUtil': 'timestamp'
}
TYPES = [messytables.StringType, messytables.DecimalType,
messytables.IntegerType, messytables.DateUtilType]
def connect(ckan_url, api_key):
ckan = RemoteCKAN(ckan_url, apikey=api_key)
return ckan
def update_resource_details(ckan, resource_id):
"""
Update webstore_url and webstore_last_updated in CKAN
"""
url_type = 'datastore'
url = f'{ckan.address}/datastore/dump/{resource_id}'
modified = datetime.now().isoformat()
format = 'Table'
ckan.action.resource_update(id=resource_id, url=url, url_type=url_type, last_modified=modified, format=format)
def chunky(iterable, n):
"""
Generates chunks of data that can be loaded into ckan
:param n: Size of each chunks
:type n: int
"""
it = iter(iterable)
item = list(itertools.islice(it, n))
while item:
yield item
item = list(itertools.islice(it, n))
def parse_data(input):
fh = open(input, 'rb')
try:
table_set = messytables.any_tableset(fh)
except messytables.ReadError as e:
print(e)
get_row_set = lambda table_set: table_set.tables.pop()
row_set = get_row_set(table_set)
offset, headers = messytables.headers_guess(row_set.sample)
# Some headers might have been converted from strings to floats and such.
headers = [str(header) for header in headers]
row_set.register_processor(messytables.headers_processor(headers))
row_set.register_processor(messytables.offset_processor(offset + 1))
types = messytables.type_guess(row_set.sample, types=TYPES, strict=True)
row_set.register_processor(messytables.types_processor(types))
headers = [header.strip() for header in headers if header.strip()]
headers_set = set(headers)
def row_iterator():
for row in row_set:
data_row = {}
for index, cell in enumerate(row):
column_name = cell.column.strip()
if column_name not in headers_set:
continue
data_row[column_name] = cell.value
yield data_row
result = row_iterator()
headers_dicts = [dict(id=field[0], type=TYPE_MAPPING[str(field[1])])
for field in zip(headers, types)]
print('Determined headers and types: {headers}'.format(
headers=headers_dicts))
return headers_dicts, result
def update_resource(ckan, input, resource_id):
_, result = parse_data(input)
count = 0
for i, records in enumerate(chunky(result, 250)):
count += len(records)
print('Saving chunk {number}'.format(number=i))
ckan.action.datastore_upsert(resource_id=resource_id, records=records, force=True, method='insert')
print('Successfully pushed {n} entries to "{res_id}".'.format(
n=count, res_id=resource_id))
def new_resource(ckan, existing, input, package_id, name):
if existing:
resource = ckan.action.resource_show(id=package_id)
else:
resource = ckan.action.resource_create(package_id=package_id, name=name)
headers, result = parse_data(input)
count = 0
for i, records in enumerate(chunky(result, 250)):
count += len(records)
print('Saving chunk {number}'.format(number=i))
ckan.action.datastore_create(resource_id=resource['id'], fields=headers, records=records, force=True)
update_resource_details(ckan, resource['id'])
print('Successfully pushed {n} entries to "{res_id}".'.format(
n=count, res_id=resource['id']))
return
``` |
{
"source": "jkglasbrenner/datamaterials-neighbors",
"score": 3
} |
#### File: datamaterials-neighbors/neighbormodels/neighbors.py
```python
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, IntervalIndex, Series
from pandas.core.groupby import DataFrameGroupBy
from pymatgen import PeriodicSite, Structure
from neighbormodels.structure import label_subspecies
Neighbor = Tuple[PeriodicSite, float, int]
SiteNeighbors = List[Optional[Neighbor]]
AllNeighborDistances = List[SiteNeighbors]
NeighborDistances = Dict[str, Union[List[str], List[float], List[int]]]
class NeighborData(NamedTuple):
neighbor_count: DataFrame
sublattice_pairs: DataFrame
structure: Structure
def count_neighbors(cell_structure: Structure, r: float) -> NeighborData:
"""Builds a data frame containing neighbor counts grouped over site-index pairs
and separation distances.
:param cell_structure: A pymatgen ``Structure`` object.
:param r: Radius of sphere.
:return: A named tuple with three field names:
``neighbor_count``
A pandas ``DataFrame`` of neighbor counts aggregated over site-index pairs
and separation distances.
``sublattice_pairs``
A pandas ``DataFrame`` of neighbor distances mapped to unique bin
intervals.
``structure``
A copy of the ``Structure`` object defining the crystal structure.
"""
cell_structure = add_subspecie_labels_if_missing(cell_structure=cell_structure)
neighbor_distances_df: DataFrame = get_neighbor_distances_data_frame(
cell_structure=cell_structure, r=r
)
distance_bins_df: DataFrame = neighbor_distances_df.pipe(
define_bins_to_group_and_sort_by_distance
)
neighbor_count_df: DataFrame = neighbor_distances_df.pipe(
group_site_index_pairs_by_distance, distance_bins_df=distance_bins_df
).pipe(count_neighbors_within_distance_groups).pipe(sort_neighbors_by_site_index_i)
sublattice_pairs_df: pd.DataFrame = neighbor_count_df.pipe(
sort_and_rank_unique_sublattice_pairs
)
return NeighborData(
neighbor_count=neighbor_count_df,
sublattice_pairs=sublattice_pairs_df,
structure=cell_structure,
)
def sort_and_rank_unique_sublattice_pairs(data_frame: DataFrame) -> DataFrame:
"""Group, sort, and rank unique subspecies_ij and distance_bin columns.
:param data_frame: A pandas ``DataFrame`` of pairwise neighbor distances.
:return: A pandas ``DataFrame`` of unique sublattice pairs.
"""
subspecies_columns = ["subspecies_i", "subspecies_j"]
sublattice_columns = subspecies_columns + ["distance_bin"]
return (
data_frame.loc[:, sublattice_columns]
.drop_duplicates(subset=sublattice_columns)
.sort_values(sublattice_columns)
.assign(rank=lambda x: x.groupby(subspecies_columns).cumcount())
.reset_index(drop=True)
)
def sort_neighbors_by_site_index_i(neighbor_count_df: DataFrame) -> DataFrame:
"""Sort by site index i, then neighbor distances, then neighbor index j.
:param neighbor_count_df: A data frame of neighbor counts aggregated over
site-index pairs and separation distances.
:return: A pandas ``DataFrame`` of neighbor counts aggregated over site-index
pairs and separation distances sorted by site index i, then neighbor
distances, then neighbor index j.
"""
return neighbor_count_df.sort_values(by=["i", "distance_bin", "j"]).reset_index(
drop=True
)
def count_neighbors_within_distance_groups(
grouped_distances: DataFrameGroupBy,
) -> DataFrame:
"""Count number of neighbors within each group of same-distance site-index pairs.
:param grouped_distances: A data frame grouped over site-index pairs, subspecies
pairs, and bin intervals.
:return: A pandas ``DataFrame`` of neighbor counts aggregated over site-index pairs
and separation distances.
"""
return (
grouped_distances.apply(
lambda x: pd.to_numeric(arg=x["distance_ij"].count(), downcast="integer")
)
.rename("n")
.reset_index()
)
def group_site_index_pairs_by_distance(
neighbor_distances_df: DataFrame, distance_bins_df: DataFrame
) -> DataFrameGroupBy:
"""Iterate over all sites, grouping by site-index pairs, subspecies pairs, and
bin intervals.
:param neighbor_distances_df: A pandas ``DataFrame`` containing all pairwise
neighbor distances.
:param distance_bins_df: A pandas ``DataFrame`` of neighbor distances mapped to
unique bin intervals.
:return: A data frame grouped over site-index pairs, subspecies pairs, and
bin intervals.
"""
binned_distances: Series = pd.cut(
x=neighbor_distances_df["distance_ij"], bins=distance_bins_df.index
).rename("distance_bin")
return neighbor_distances_df.groupby(
["i", "j", "subspecies_i", "subspecies_j", binned_distances]
)
def define_bins_to_group_and_sort_by_distance(
neighbor_distances_df: DataFrame,
) -> DataFrame:
"""Defines bin intervals to group and sort neighbor pairs by distance.
:param neighbor_distances_df: A pandas ``DataFrame`` of pairwise neighbor
distances.
:return: A pandas ``DataFrame`` of neighbor distances mapped to unique bin
intervals.
"""
unique_distances: np.ndarray = find_unique_distances(
distance_ij=neighbor_distances_df["distance_ij"]
)
bin_intervals: IntervalIndex = define_bin_intervals(
unique_distances=unique_distances
)
return DataFrame(
data={
"distance_bin": Categorical(values=bin_intervals, ordered=True),
"distance_ij": Categorical(values=unique_distances, ordered=True),
},
index=bin_intervals,
)
def find_unique_distances(distance_ij: Series) -> np.ndarray:
"""Finds the unique distances that define the neighbor groups.
:param distance_ij: A pandas ``Series`` of pairwise neighbor distances.
:return: An array of unique neighbor distances.
"""
unique_floats: np.ndarray = np.sort(distance_ij.unique())
next_distance_not_close: np.ndarray = np.logical_not(
np.isclose(unique_floats[1:], unique_floats[:-1])
)
return np.concatenate(
(unique_floats[:1], unique_floats[1:][next_distance_not_close])
)
def define_bin_intervals(unique_distances: np.ndarray) -> IntervalIndex:
"""Constructs bin intervals used to group over neighbor distances.
This binning procedure provides a robust method for grouping data based on a
variable with a float data type.
:param unique_distances: An array of neighbor distances returned by asking
pandas to return the unique distances.
:return: A pandas ``IntervalIndex`` defining bin intervals can be used to sort
and group neighbor distances.
"""
bin_centers: np.ndarray = np.concatenate(([0], unique_distances))
bin_edges: np.ndarray = np.concatenate(
[
bin_centers[:-1] + (bin_centers[1:] - bin_centers[:-1]) / 2,
bin_centers[-1:] + (bin_centers[-1:] - bin_centers[-2:-1]) / 2,
]
)
return IntervalIndex.from_breaks(breaks=bin_edges)
def get_neighbor_distances_data_frame(cell_structure: Structure, r: float) -> DataFrame:
"""Get data frame of pairwise neighbor distances for each atom in the unit cell,
out to a distance ``r``.
:param cell_structure: A pymatgen ``Structure`` object.
:param r: Radius of sphere.
:return: A pandas ``DataFrame`` of pairwise neighbor distances.
"""
all_neighbors: AllNeighborDistances = cell_structure.get_all_neighbors(
r=r, include_index=True
)
neighbor_distances: NeighborDistances = extract_neighbor_distance_data(
cell_structure=cell_structure, all_neighbors=all_neighbors
)
return DataFrame(data=neighbor_distances)
def extract_neighbor_distance_data(
cell_structure: Structure, all_neighbors: AllNeighborDistances
) -> NeighborDistances:
"""Extracts the site indices, site species, and neighbor distances for each pair
and stores it in a dictionary.
:param cell_structure: A pymatgen ``Structure`` object.
:param all_neighbors: A list of lists containing the neighbors for each site in
the structure.
:return: A dictionary of site indices, site species, and neighbor distances for
each pair.
"""
neighbor_distances: NeighborDistances = {
"i": [],
"j": [],
"subspecies_i": [],
"subspecies_j": [],
"distance_ij": [],
}
for site_i_index, site_i_neighbors in enumerate(all_neighbors):
append_site_i_neighbor_distance_data(
site_i_index=site_i_index,
site_i_neighbors=site_i_neighbors,
cell_structure=cell_structure,
neighbor_distances=neighbor_distances,
)
return neighbor_distances
def append_site_i_neighbor_distance_data(
site_i_index: int,
site_i_neighbors: SiteNeighbors,
cell_structure: Structure,
neighbor_distances: NeighborDistances,
) -> None:
"""Helper function to append indices, species, and distances in the
``neighbor_distances`` dictionary.
:param site_i_index: Site index of first site in neighbor pair.
:param site_i_neighbors: A list of site i's neighbors.
:param cell_structure: The pymatgen ``Structure`` object that defines the crystal
structure.
:param neighbor_distances: A dictionary of site indices, site species, and neighbor
distances for each pair.
"""
for site_j in site_i_neighbors:
subspecies_pair: List[str] = [
cell_structure[site_i_index].properties["subspecie"],
cell_structure[site_j[2]].properties["subspecie"],
]
index_pair: List[str] = [site_i_index, site_j[2]]
neighbor_distances["i"].append(index_pair[0])
neighbor_distances["j"].append(index_pair[1])
neighbor_distances["subspecies_i"].append(subspecies_pair[0])
neighbor_distances["subspecies_j"].append(subspecies_pair[1])
neighbor_distances["distance_ij"].append(site_j[1])
def add_subspecie_labels_if_missing(cell_structure: Structure) -> Structure:
"""Makes a copy of ``cell_structure`` and then checks if ``cell_structure`` has
the subspecie site property. If it does, then return the copy as-is, otherwise
label each site of the copy using the site's atomic specie name and then return
it.
:param cell_structure: A pymatgen ``Structure`` object.
:return: An exact copy of the input ``cell_structure`` object with subspecie
labels added, if missing.
"""
cell_structure = cell_structure.copy()
if "subspecie" not in cell_structure.site_properties:
label_subspecies(cell_structure=cell_structure, site_indices=[])
return cell_structure
``` |
{
"source": "jkglasbrenner/dft-bare-susceptibility",
"score": 3
} |
#### File: dft-bare-susceptibility/dft_bare_susceptibility/dx_file_reader.py
```python
from pathlib import Path
import csv
import re
import gzip
import numpy as np
import sys
# Functions
def find_regexp(file_path, regexp):
with gzip.open(str(file_path), 'rt') as in_file:
tmp_match = regexp.findall(in_file.read())
return tmp_match
def extract_from_dx_file(file_path):
# Convert filepath string to Path object
file_path = Path(file_path)
# Extract from .dx file
nx, ny, nz = find_qmesh_pts(file_path)
q_vectors = find_qmesh_vectors(file_path)
origin = find_origin(file_path)
shape = find_shape(file_path)
data_string = find_data(file_path)
# Create dx data array
data_list = create_dx_data_list(data_string, shape)
# Convert dx format to generic numpy array format
data_array = build_data_array(nx, ny, nz, shape, data_list)
# Output
return q_vectors, origin, data_array
def find_qmesh_pts(file_path):
# Create regexp
regexp = re.compile(r'(?:object\s+1.*?)(\d+)(?:\s+)'
'(\d+)(?:\s+)(\d+)')
# Open and run regexp
tmp_match = find_regexp(file_path, regexp)
# Extract kpts
nx, ny, nz = tmp_match[0]
# Output
return int(nx), int(ny), int(nz)
def find_qmesh_vectors(file_path):
# Create regexp
re_num = r'([+-]?\d+\.?\d+(?:[eE][-+]?\d+)?)'
re_exp = (r'(?:delta\s+)' + re_num + r'(?:\s+)'
+ re_num + r'(?:\s+)' + re_num)
regexp = re.compile(re_exp)
# Open and run regexp
tmp_match = find_regexp(file_path, regexp)
# Extract kmesh
qx = tuple(float(item) for item in tmp_match[0])
qy = tuple(float(item) for item in tmp_match[1])
qz = tuple(float(item) for item in tmp_match[2])
# Convert to numpy array
q_vectors = np.array([qx, qy, qz])
# Output
return q_vectors
def find_origin(file_path):
# Create regexp
re_num = r'([+-]?\d+\.?\d+(?:[eE][-+]?\d+)?)'
re_exp = (r'(?:origin\s+)' + re_num + r'(?:\s+)'
+ re_num + r'(?:\s+)' + re_num)
regexp = re.compile(re_exp)
# Open and run regexp
tmp_match = find_regexp(file_path, regexp)
# Extract origin
origin = tuple(float(item) for item in tmp_match[0])
# Output
return origin
def find_shape(file_path):
# Create regexp
re_exp = r'(?:object\s+3.*?shape\s+)(\d+)'
regexp = re.compile(re_exp)
# Open and run regexp
tmp_match = find_regexp(file_path, regexp)
# Extract kmesh
shape = tmp_match[0]
# Output
return int(shape)
def find_data(file_path):
# Create regexp
regexp = re.compile(r'(?:data follows.*?\n)'
'((?:.*?\n)+?)(?:\s*object)')
# Open and run regexp
tmp_match = find_regexp(file_path, regexp)
# Output
return tmp_match[0]
def create_dx_data_list(string, shape):
# Create regexp
re_exp = r'([+-]?\d+\.?\d+(?:[eE][-+]?\d+)?)'
regexp = re.compile(re_exp)
# Run regexp
tmp_match = regexp.findall(string)
# Create lists
my_list = []
tmp_list = []
i = 1
for item in tmp_match:
if i <= shape:
tmp_list.append(float(item))
i = i + 1
if i > shape:
my_list.append(tmp_list)
tmp_list = []
i = 1
# Convert to numpy array
my_list = np.array(my_list)
# Output
return my_list
def build_data_array(nx, ny, nz, num_items, in_array):
# Allocate numpy array
tmp_array = np.zeros([num_items, nx, ny, nz])
# Count entries in in_array
slice_index = int(0)
end_index = slice_index + nz
# Assign to array
for i in range(nx):
for j in range(ny):
for n in range(num_items):
tmp_array[n, i, j, :] = in_array[slice_index:end_index, n]
slice_index = slice_index + nz
end_index = slice_index + nz
# Output array
return tmp_array
def write_file(data_array, qx, qy, qz, origin, outfilepath, data_format="csv"):
# Grab data_array dimensions
array_dimensions = data_array.shape
if len(array_dimensions) == 3:
nx, ny, nz = array_dimensions
shape = None
elif len(array_dimensions) == 4:
shape, nx, ny, nz = array_dimensions
else:
sys.exit("Unexpected data structure, exiting...")
# Open file
if data_format == "dx":
with open(str(outfilepath), 'wt') as out_file:
write_header(nx, ny, nz, qx, qy, qz, origin, shape, out_file)
write_data_columns(data_array, shape, nx, ny, nz, out_file)
write_footer(out_file)
elif data_format == "csv":
with open(str(outfilepath), 'w', newline="") as out_file:
write_csv_format(data_array, shape, nx, ny, nz, out_file)
else:
print("{0} is not a valid data format, exiting...".format(data_format))
def write_csv_format(data_array, shape, nx, ny, nz, outfile):
if shape is not None:
fieldnames = ["band_index", "kx", "ky", "kz", "energy"]
writer = csv.DictWriter(outfile, fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for band_index in range(shape):
for kx in range(nx):
for ky in range(ny):
for kz in range(nz):
data_row = {
"band_index": band_index,
"kx": kx,
"ky": ky,
"kz": kz,
"energy": data_array[band_index, kx, ky, kz]
}
writer.writerow(data_row)
else:
fieldnames = ["kx", "ky", "kz", "chi_real", "chi_imag"]
writer = csv.DictWriter(outfile, fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for kx in range(nx):
for ky in range(ny):
for kz in range(nz):
data_row = {
"kx": kx,
"ky": ky,
"kz": kz,
"chi_real": np.real(data_array[kx, ky, kz]),
"chi_imag": np.imag(data_array[kx, ky, kz])
}
writer.writerow(data_row)
def write_data_columns(data_array, shape, nx, ny, nz, outfile):
# Counters
xx = 0
yy = 0
zz = 0
# Loop for printing
for i in range(nx):
for j in range(ny):
zz = zz_loop(data_array, shape, nz, xx, yy, zz, outfile)
yy = iterate_counter(yy, ny)
xx = iterate_counter(xx, nx)
def zz_loop(data_array, shape, nz, xx, yy, zz, outfile):
# Set up output format
string = ''
if shape is not None:
for n in range(shape):
string = string + '{{i{0}:12.6f}}'.format(n)
else:
for n in range(2):
string = string + '{{i{0}:12.6f}}'.format(n)
# Printing loop
for k in range(nz):
line_dict = dict()
if shape is not None:
for n in range(shape):
line_dict["i" + str(n)] = data_array[n, xx, yy, zz]
else:
line_dict["i0"] = np.real(data_array[xx, yy, zz])
line_dict["i1"] = np.imag(data_array[xx, yy, zz])
tmp_line = string.format(**line_dict)
print(tmp_line, file=outfile)
zz = iterate_counter(zz, nz)
# Return counter
return zz
def write_header(nx, ny, nz, qx, qy, qz, origin, shape, outfile):
if shape is None:
shape = 2
nitems = int(nx * ny * nz)
header = (' object 1 class gridpositions counts {0:>11} {1:>11} {2:>11}\n'
'origin {3:14.8f} {4:14.8f} {5:14.8f}\n'
'delta {6:11.8f} {7:11.8f} {8:11.8f}\n'
'delta {9:11.8f} {10:11.8f} {11:11.8f}\n'
'delta {12:11.8f} {13:11.8f} {14:11.8f}\n'
' object 2 class gridconnections counts {15:>11} '
'{16:>11} {17:>11}\n'
' object 3 class array type float rank 1 shape {18:>11} items'
' {19:>11}\n'
' data follows')
output_str = header.format(nx, ny, nz,
origin[0], origin[1], origin[2],
qx[0], qx[1], qx[2],
qy[0], qy[1], qy[2],
qz[0], qz[1], qz[2],
nx, ny, nz,
shape, nitems)
# Write header
print(output_str, file=outfile)
def write_footer(outfile):
footer = (' object "regular positions regular connections" class '
'field\n'
' component "positions" value 1\n'
' component "connections" value 2\n'
' component "data" value 3\n'
' end')
# Write footer
print(footer, file=outfile)
def test_counter(count, qmax):
if count >= qmax:
count = int(0)
return count
else:
return count
def iterate_counter(count, qmax):
count += int(1)
return test_counter(count, qmax)
def expand_data_range(data_array):
# Calculate expanded parameters
in_nx, in_ny, in_nz = data_array.shape
nx = in_nx + 1
ny = in_ny + 1
nz = in_nz + 1
# Allocate array for expanded data
new_data = np.zeros([nx, ny, nz], dtype=np.complex)
# Fill existing array
new_data[:-1, :-1, :-1] = data_array
# Fill in end of Brillouin zone
end_bz_fill(new_data, nx, ny, nz)
# Return results
return new_data
def end_bz_fill(new_data, nx, ny, nz):
for xx in range(nx):
for yy in range(ny):
for zz in range(nz):
i = xx
j = yy
k = zz
if (xx == (nx - 1)) or (yy == (ny - 1)) or (zz == (nz - 1)):
if xx == (nx - 1):
i = 0
if yy == (ny - 1):
j = 0
if zz == (nz - 1):
k = 0
new_data[xx, yy, zz] = new_data[i, j, k]
``` |
{
"source": "jkglasbrenner/sshcustodian",
"score": 2
} |
#### File: sshcustodian/sshcustodian/sshcustodian.py
```python
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from six.moves import filterfalse
"""
This module creates a subclass of the main Custodian class in the Custodian
project (github.com/materialsproject/custodian), which is a wrapper that
manages jobs running on computing clusters. The Custodian module is part of The
Materials Project (materialsproject.org/).
This subclass adds the functionality to copy the temporary directory created
via monty to the scratch partitions on slave compute nodes, provided that the
cluster's filesystem is configured in this way. The implementation invokes a
subprocess to utilize the ssh executable installed on the cluster, so it is not
particularly elegant or platform independent, nor is this solution likely to be
general to all clusters. This is why this modification has not been submitted
as a pull request to the main Custodian project.
"""
# Import modules
import logging
import subprocess
import sys
import datetime
import time
import os
import re
from itertools import islice, groupby
from socket import gethostname
from monty.tempfile import ScratchDir
from monty.shutil import gzip_dir
from monty.json import MontyEncoder
from monty.serialization import dumpfn
from custodian.custodian import Custodian
from custodian.custodian import CustodianError
# Module-level logger
logger = logging.getLogger(__name__)
class SSHCustodian(Custodian):
"""
The SSHCustodian class modifies the Custodian class from the custodian
module to be able to handle clusters that have separate scratch partitions
for each node. When scratch_dir_node_only is enabled, the temp_dir that
monty creates will be copied to all other compute nodes used in the
calculation and subsequently removed when the job is finished.
"""
__doc__ += Custodian.__doc__
def __init__(self, handlers, jobs, validators=None, max_errors=1,
polling_time_step=10, monitor_freq=30,
skip_over_errors=False, scratch_dir=None,
gzipped_output=False, checkpoint=False,
scratch_dir_node_only=False, pbs_nodefile=None):
""" scratch_dir_node_only (bool): If set to True, custodian will grab
the list of nodes in the file path provided to pbs_nodefile and
use copy the temp_dir to the scratch_dir on each node over
ssh. This is necessary on cluster setups where each node has
its own independent scratch partition.
pbs_nodefile (str): The filepath to the list of nodes to be used in
a calculation. If this path does not point to a valid file,
then scratch_dir_node_only will be automatically set to False.
"""
super(SSHCustodian, self).__init__(handlers, jobs, validators,
max_errors, polling_time_step,
monitor_freq, skip_over_errors,
scratch_dir, gzipped_output,
checkpoint)
self.hostname = gethostname()
if pbs_nodefile is None:
self.scratch_dir_node_only = False
self.slave_compute_node_list = None
elif os.path.exists(pbs_nodefile):
self.scratch_dir_node_only = scratch_dir_node_only
self.pbs_nodefile = pbs_nodefile
self.slave_compute_node_list = (
self._process_pbs_nodefile(self.pbs_nodefile, self.hostname))
else:
self.scratch_dir_node_only = False
self.pbs_nodefile = None
self.slave_compute_node_list = None
@staticmethod
def _process_pbs_nodefile(pbs_nodefile, hostname):
with open(pbs_nodefile) as in_file:
nodelist = in_file.read().splitlines()
slave_compute_node_list = [
node for node, _ in groupby(filterfalse(lambda x: x == hostname,
nodelist))
]
return slave_compute_node_list
def _copy_to_slave_node_dirs(self, temp_dir_path):
"""
Copy temporary scratch directory from master node to other nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = ['rsync', '-azhq', temp_dir_path,
'{0}:{1}'.format(node,
os.path.abspath(self.scratch_dir))]
p = subprocess.Popen(command, shell=False)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _update_slave_node_vasp_input_files(self, temp_dir_path):
"""
Update VASP input files in the scratch partition on the slave compute
nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
VASP_INPUT_FILES = [x for x in ["{0}/CHGCAR".format(temp_dir_path),
"{0}/WAVECAR".format(temp_dir_path),
"{0}/INCAR".format(temp_dir_path),
"{0}/POSCAR".format(temp_dir_path),
"{0}/POTCAR".format(temp_dir_path),
"{0}/KPOINTS".format(temp_dir_path)] if
os.path.exists(x)]
process_list = []
for node in self.slave_compute_node_list:
for filepath in VASP_INPUT_FILES:
command = 'scp {0} {1}:{2}/'.format(filepath, node,
temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _delete_slave_node_dirs(self, temp_dir_path):
"""
Delete the temporary scratch directory on the slave nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = 'ssh {0} "rm -rf {1}"'.format(node, temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for deletion to finish before moving on
for process in process_list:
process.wait()
def _manage_node_scratch(self, temp_dir_path, job_start):
"""
Checks whether the user wants to make use of scratch partitions on each
compute node, and if True, either copies the temporary directory to or
deletes the temporary directory from each slave compute node. If the
user does not specify to use node-specific scratch partitions, then the
function does nothing.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job_start (bool): If True, then the job has started and the
temporary directory will be copied to the slave compute
nodes. If False, then the temporary directories will be deleted
from the slave compute nodes.
"""
if self.scratch_dir_node_only:
if job_start:
self._copy_to_slave_node_dirs(temp_dir_path)
else:
self._delete_slave_node_dirs(temp_dir_path)
else:
pass
def _update_node_scratch(self, temp_dir_path, job):
"""
Method to update the scratch partitions on the slave compute nodes
if they exist and are running a VASP job.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job (object): The job object you intend to run. Currently supports
VASP jobs.
"""
vasp_re = re.compile(r'vasp')
if self.scratch_dir is not None:
try:
jobtype = job.get_jobtype()
if self.scratch_dir_node_only:
if vasp_re.match(jobtype):
self._update_slave_node_vasp_input_files(temp_dir_path)
else:
pass
else:
pass
except:
pass
else:
pass
def run(self):
"""
Override of Custodian.run() to include instructions to copy the
temp_dir to the scratch partition on slave compute nodes if requested.
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=True)
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job, temp_dir)
# Checkpoint after each job so that we can recover from
# last point and remove old checkpoints
if self.checkpoint:
super(SSHCustodian, self)._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(super(SSHCustodian,
self).LOG_FILE))
dumpfn(self.run_log, super(SSHCustodian, self).LOG_FILE,
cls=MontyEncoder, indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
# Remove duplicate copy of log file, provided it ends with
# ".log"
for x in ([x for x in os.listdir(temp_dir)
if re.match(r'\w*\.log', x)]):
os.remove(os.path.join(temp_dir, x))
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=False)
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
super(SSHCustodian, self)._delete_checkpoints(cwd)
return self.run_log
def _run_job(self, job_n, job, temp_dir):
"""
Overrides custodian.custodian._run_job() to propagate changes to input
files on different scratch partitions on compute nodes, if needed.
"""
self.run_log.append({"job": job.as_dict(), "corrections": []})
job.setup()
for attempt in range(1, self.max_errors - self.total_errors + 1):
# Propagate updated input files, if needed
self._update_node_scratch(temp_dir, job)
logger.info(
"Starting job no. {} ({}) attempt no. {}. Errors "
"thus far = {}.".format(
job_n, job.name, attempt, self.total_errors))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
p.terminate)
else:
p.wait()
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
s = "Validation failed: {}".format(v)
raise CustodianError(s, True, v)
job.postprocess()
return
# check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise CustodianError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
s = "Unrecoverable error for handler: %s" % x["handler"]
raise CustodianError(s, False, x["handler"])
logger.info("Max errors reached.")
raise CustodianError("MaxErrors", True)
# Inherit Custodian docstrings
__init__.__doc__ = Custodian.__init__.__doc__ + __init__.__doc__
run.__doc__ = Custodian.run.__doc__
_run_job.__doc__ = Custodian._run_job.__doc__
``` |
{
"source": "jkgoodrich/hail",
"score": 3
} |
#### File: auth/auth/exceptions.py
```python
from aiohttp import web
class AuthUserError(Exception):
def __init__(self, message, severity):
super().__init__(message)
self.message = message
self.ui_error_type = severity
def http_response(self):
return web.HTTPBadRequest(reason=self.message)
class EmptyLoginID(AuthUserError):
def __init__(self, username):
super().__init__(f"Login id for user '{username}' must be a non-empty string.", 'error')
class DuplicateLoginID(AuthUserError):
def __init__(self, username, login_id):
super().__init__(f"Login id '{login_id}' already exists for user '{username}'.", 'error')
class DuplicateUsername(AuthUserError):
def __init__(self, username, login_id):
super().__init__(f"Username '{username}' already exists with login id '{login_id}'.", 'error')
class InvalidUsername(AuthUserError):
def __init__(self, username):
super().__init__(f"Invalid username '{username}'. Must be a non-empty alphanumeric string.", 'error')
class InvalidType(AuthUserError):
def __init__(self, field_name, input, expected_type):
super().__init__(f"Expected '{field_name}' is of type {expected_type}. Found type {type(input)}", 'error')
class MultipleUserTypes(AuthUserError):
def __init__(self, username):
super().__init__(f"User '{username}' cannot be both a developer and a service account.", 'error')
class MultipleExistingUsers(AuthUserError):
def __init__(self, username, login_id):
super().__init__(
f"Multiple users with user name '{username}' and login id '{login_id}' appear in the database.",
'error',
)
def http_response(self):
return web.HTTPInternalServerError(reason=self.message)
class UnknownUser(AuthUserError):
def __init__(self, username):
super().__init__(f"Unknown user '{username}'.", 'error')
def http_response(self):
return web.HTTPNotFound(reason=self.message)
class PreviouslyDeletedUser(AuthUserError):
def __init__(self, username):
super().__init__(f"User '{username}' has already been deleted.", 'error')
```
#### File: auth/auth/flow.py
```python
import abc
import json
import urllib.parse
import aiohttp.web
import google.auth.transport.requests
import google.oauth2.id_token
import google_auth_oauthlib.flow
import msal
from gear.cloud_config import get_global_config
class FlowResult:
def __init__(self, login_id: str, email: str, token: dict):
self.login_id = login_id
self.email = email
self.token = token
class Flow(abc.ABC):
@abc.abstractmethod
def initiate_flow(self, redirect_uri: str) -> dict:
"""
Initiates the OAuth2 flow. Usually run in response to a user clicking a login button.
The returned dict should be stored in a secure session so that the server can
identify to which OAuth2 flow a client is responding. In particular, the server must
pass this dict to :meth:`.receive_callback` in the OAuth2 callback.
"""
raise NotImplementedError
@abc.abstractmethod
def receive_callback(self, request: aiohttp.web.Request, flow_dict: dict) -> FlowResult:
"""Concludes the OAuth2 flow by returning the user's identity and credentials."""
raise NotImplementedError
class GoogleFlow(Flow):
scopes = [
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email',
'openid',
]
def __init__(self, credentials_file: str):
self._credentials_file = credentials_file
def initiate_flow(self, redirect_uri: str) -> dict:
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
self._credentials_file, scopes=self.scopes, state=None
)
flow.redirect_uri = redirect_uri
authorization_url, state = flow.authorization_url(access_type='offline', include_granted_scopes='true')
return {
'authorization_url': authorization_url,
'redirect_uri': redirect_uri,
'state': state,
}
def receive_callback(self, request: aiohttp.web.Request, flow_dict: dict) -> FlowResult:
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
self._credentials_file, scopes=self.scopes, state=flow_dict['state']
)
flow.redirect_uri = flow_dict['callback_uri']
flow.fetch_token(code=request.query['code'])
token = google.oauth2.id_token.verify_oauth2_token(
flow.credentials.id_token, google.auth.transport.requests.Request()
)
email = token['email']
return FlowResult(email, email, token)
class AzureFlow(Flow):
def __init__(self, credentials_file: str):
with open(credentials_file, encoding='utf-8') as f:
data = json.loads(f.read())
tenant_id = data['tenant']
authority = f'https://login.microsoftonline.com/{tenant_id}'
client = msal.ConfidentialClientApplication(data['appId'], data['password'], authority)
self._client = client
self._tenant_id = tenant_id
def initiate_flow(self, redirect_uri: str) -> dict:
flow = self._client.initiate_auth_code_flow(scopes=[], redirect_uri=redirect_uri)
return {
'flow': flow,
'authorization_url': flow['auth_uri'],
'state': flow['state'],
}
def receive_callback(self, request: aiohttp.web.Request, flow_dict: dict) -> FlowResult:
query_dict = urllib.parse.parse_qs(request.query_string)
query_dict = {k: v[0] for k, v in query_dict.items()}
token = self._client.acquire_token_by_auth_code_flow(flow_dict['flow'], query_dict)
if 'error' in token:
raise Exception(f'{token}')
tid = token['id_token_claims']['tid']
if tid != self._tenant_id:
raise Exception('invalid tenant id')
return FlowResult(token['id_token_claims']['oid'], token['id_token_claims']['preferred_username'], token)
def get_flow_client(credentials_file: str) -> Flow:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return AzureFlow(credentials_file)
assert cloud == 'gcp'
return GoogleFlow(credentials_file)
```
#### File: batch/batch/inst_coll_config.py
```python
import asyncio
import logging
from typing import Dict, Optional, Tuple
from gear import Database
from .cloud.azure.instance_config import AzureSlimInstanceConfig
from .cloud.azure.resource_utils import azure_worker_properties_to_machine_type
from .cloud.gcp.instance_config import GCPSlimInstanceConfig
from .cloud.gcp.resource_utils import GCP_MACHINE_FAMILY, family_worker_type_cores_to_gcp_machine_type
from .cloud.resource_utils import (
adjust_cores_for_memory_request,
adjust_cores_for_packability,
cores_mcpu_to_memory_bytes,
local_ssd_size,
machine_type_to_worker_type_cores,
requested_storage_bytes_to_actual_storage_gib,
valid_machine_types,
)
from .cloud.utils import possible_cloud_locations
from .driver.billing_manager import ProductVersions
from .instance_config import InstanceConfig
log = logging.getLogger('inst_coll_config')
def instance_config_from_pool_config(
pool_config: 'PoolConfig', product_versions: ProductVersions, location: str
) -> InstanceConfig:
cloud = pool_config.cloud
if cloud == 'gcp':
machine_type = family_worker_type_cores_to_gcp_machine_type(
GCP_MACHINE_FAMILY, pool_config.worker_type, pool_config.worker_cores
)
return GCPSlimInstanceConfig.create(
product_versions=product_versions,
machine_type=machine_type,
preemptible=pool_config.preemptible,
local_ssd_data_disk=pool_config.worker_local_ssd_data_disk,
data_disk_size_gb=pool_config.data_disk_size_gb,
boot_disk_size_gb=pool_config.boot_disk_size_gb,
job_private=False,
location=location,
)
assert cloud == 'azure'
machine_type = azure_worker_properties_to_machine_type(
pool_config.worker_type, pool_config.worker_cores, pool_config.worker_local_ssd_data_disk
)
return AzureSlimInstanceConfig.create(
product_versions=product_versions,
machine_type=machine_type,
preemptible=pool_config.preemptible,
local_ssd_data_disk=pool_config.worker_local_ssd_data_disk,
data_disk_size_gb=pool_config.data_disk_size_gb,
boot_disk_size_gb=pool_config.boot_disk_size_gb,
job_private=False,
location=location,
)
class InstanceCollectionConfig:
pass
class PoolConfig(InstanceCollectionConfig):
@staticmethod
def from_record(record):
return PoolConfig(
name=record['name'],
cloud=record['cloud'],
worker_type=record['worker_type'],
worker_cores=record['worker_cores'],
worker_local_ssd_data_disk=record['worker_local_ssd_data_disk'],
worker_external_ssd_data_disk_size_gb=record['worker_external_ssd_data_disk_size_gb'],
enable_standing_worker=record['enable_standing_worker'],
standing_worker_cores=record['standing_worker_cores'],
boot_disk_size_gb=record['boot_disk_size_gb'],
max_instances=record['max_instances'],
max_live_instances=record['max_live_instances'],
preemptible=bool(record['preemptible']),
)
async def update_database(self, db: Database):
await db.just_execute(
'''
UPDATE pools
INNER JOIN inst_colls ON pools.name = inst_colls.name
SET worker_cores = %s,
worker_local_ssd_data_disk = %s,
worker_external_ssd_data_disk_size_gb = %s,
enable_standing_worker = %s,
standing_worker_cores = %s,
boot_disk_size_gb = %s,
max_instances = %s,
max_live_instances = %s,
preemptible = %s
WHERE pools.name = %s;
''',
(
self.worker_cores,
self.worker_local_ssd_data_disk,
self.worker_external_ssd_data_disk_size_gb,
self.enable_standing_worker,
self.standing_worker_cores,
self.boot_disk_size_gb,
self.max_instances,
self.max_live_instances,
self.preemptible,
self.name,
),
)
def __init__(
self,
name: str,
cloud: str,
worker_type: str,
worker_cores: int,
worker_local_ssd_data_disk: bool,
worker_external_ssd_data_disk_size_gb: int,
enable_standing_worker: bool,
standing_worker_cores: int,
boot_disk_size_gb: int,
max_instances: int,
max_live_instances: int,
preemptible: bool,
):
self.name = name
self.cloud = cloud
self.worker_type = worker_type
self.worker_cores = worker_cores
self.worker_local_ssd_data_disk = worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = enable_standing_worker
self.standing_worker_cores = standing_worker_cores
self.boot_disk_size_gb = boot_disk_size_gb
self.max_instances = max_instances
self.max_live_instances = max_live_instances
self.preemptible = preemptible
def instance_config(self, product_versions: ProductVersions, location: str) -> InstanceConfig:
return instance_config_from_pool_config(self, product_versions, location)
@property
def data_disk_size_gb(self) -> int:
if self.worker_local_ssd_data_disk:
return local_ssd_size(self.cloud, self.worker_type, self.worker_cores)
return self.worker_external_ssd_data_disk_size_gb
@property
def data_disk_size_standing_gb(self) -> int:
if self.worker_local_ssd_data_disk:
return local_ssd_size(self.cloud, self.worker_type, self.standing_worker_cores)
return self.worker_external_ssd_data_disk_size_gb
def convert_requests_to_resources(self, cores_mcpu, memory_bytes, storage_bytes):
storage_gib = requested_storage_bytes_to_actual_storage_gib(self.cloud, storage_bytes, allow_zero_storage=True)
if storage_gib is None:
return None
cores_mcpu = adjust_cores_for_memory_request(self.cloud, cores_mcpu, memory_bytes, self.worker_type)
cores_mcpu = adjust_cores_for_packability(cores_mcpu)
memory_bytes = cores_mcpu_to_memory_bytes(self.cloud, cores_mcpu, self.worker_type)
if cores_mcpu <= self.worker_cores * 1000:
return (cores_mcpu, memory_bytes, storage_gib)
return None
def cost_per_hour(self, resource_rates, product_versions, location, cores_mcpu, memory_bytes, storage_gib):
instance_config = self.instance_config(product_versions, location)
cost_per_hour = instance_config.cost_per_hour(resource_rates, cores_mcpu, memory_bytes, storage_gib)
return cost_per_hour
class JobPrivateInstanceManagerConfig(InstanceCollectionConfig):
@staticmethod
def from_record(record):
return JobPrivateInstanceManagerConfig(
record['name'],
record['cloud'],
record['boot_disk_size_gb'],
record['max_instances'],
record['max_live_instances'],
)
def __init__(self, name, cloud, boot_disk_size_gb: int, max_instances, max_live_instances):
self.name = name
self.cloud = cloud
self.boot_disk_size_gb = boot_disk_size_gb
self.max_instances = max_instances
self.max_live_instances = max_live_instances
def convert_requests_to_resources(self, machine_type, storage_bytes):
storage_gib = requested_storage_bytes_to_actual_storage_gib(self.cloud, storage_bytes, allow_zero_storage=False)
if storage_gib is None:
return None
worker_type, cores = machine_type_to_worker_type_cores(self.cloud, machine_type)
cores_mcpu = cores * 1000
memory_bytes = cores_mcpu_to_memory_bytes(self.cloud, cores_mcpu, worker_type)
return (self.name, cores_mcpu, memory_bytes, storage_gib)
class InstanceCollectionConfigs:
@staticmethod
async def create(db: Database):
(name_pool_config, jpim_config), resource_rates, product_versions_data = await asyncio.gather(
InstanceCollectionConfigs.instance_collections_from_db(db),
InstanceCollectionConfigs.resource_rates_from_db(db),
InstanceCollectionConfigs.product_versions_from_db(db),
)
return InstanceCollectionConfigs(name_pool_config, jpim_config, resource_rates, product_versions_data)
@staticmethod
async def instance_collections_from_db(
db: Database,
) -> Tuple[Dict[str, PoolConfig], JobPrivateInstanceManagerConfig]:
records = db.execute_and_fetchall(
'''
SELECT inst_colls.*, pools.*
FROM inst_colls
LEFT JOIN pools ON inst_colls.name = pools.name;
'''
)
name_pool_config: Dict[str, PoolConfig] = {}
jpim_config: Optional[JobPrivateInstanceManagerConfig] = None
async for record in records:
if record['is_pool']:
config = PoolConfig.from_record(record)
name_pool_config[config.name] = config
else:
config = JobPrivateInstanceManagerConfig.from_record(record)
jpim_config = config
assert jpim_config is not None
return name_pool_config, jpim_config
@staticmethod
async def resource_rates_from_db(db: Database) -> Dict[str, float]:
return {
record['resource']: record['rate'] async for record in db.execute_and_fetchall('SELECT * FROM resources;')
}
@staticmethod
async def product_versions_from_db(db: Database) -> Dict[str, str]:
return {
record['product']: record['version']
async for record in db.execute_and_fetchall('SELECT * FROM latest_product_versions;')
}
def __init__(
self,
name_pool_config: Dict[str, PoolConfig],
jpim_config: JobPrivateInstanceManagerConfig,
resource_rates: Dict[str, float],
product_versions_data: Dict[str, str],
):
self.name_pool_config = name_pool_config
self.jpim_config = jpim_config
self.resource_rates = resource_rates
self.product_versions = ProductVersions(product_versions_data)
async def refresh(self, db: Database):
configs, resource_rates, product_versions_data = await asyncio.gather(
InstanceCollectionConfigs.instance_collections_from_db(db),
InstanceCollectionConfigs.resource_rates_from_db(db),
InstanceCollectionConfigs.product_versions_from_db(db),
)
self.name_pool_config, self.jpim_config = configs
self.resource_rates = resource_rates
self.product_versions.update(product_versions_data)
def select_pool_from_cost(self, cloud, cores_mcpu, memory_bytes, storage_bytes, preemptible):
assert self.resource_rates is not None
optimal_result = None
optimal_cost = None
for pool in self.name_pool_config.values():
if pool.cloud != cloud or pool.preemptible != preemptible:
continue
result = pool.convert_requests_to_resources(cores_mcpu, memory_bytes, storage_bytes)
if result:
maybe_cores_mcpu, maybe_memory_bytes, maybe_storage_gib = result
max_regional_maybe_cost = None
for location in possible_cloud_locations(pool.cloud):
maybe_cost = pool.cost_per_hour(
self.resource_rates,
self.product_versions,
location,
maybe_cores_mcpu,
maybe_memory_bytes,
maybe_storage_gib,
)
if max_regional_maybe_cost is None or maybe_cost > max_regional_maybe_cost:
max_regional_maybe_cost = maybe_cost
if optimal_cost is None or max_regional_maybe_cost < optimal_cost:
optimal_cost = max_regional_maybe_cost
optimal_result = (pool.name, maybe_cores_mcpu, maybe_memory_bytes, maybe_storage_gib)
return optimal_result
def select_pool_from_worker_type(self, cloud, worker_type, cores_mcpu, memory_bytes, storage_bytes, preemptible):
for pool in self.name_pool_config.values():
if pool.cloud == cloud and pool.worker_type == worker_type and pool.preemptible == preemptible:
result = pool.convert_requests_to_resources(cores_mcpu, memory_bytes, storage_bytes)
if result:
actual_cores_mcpu, actual_memory_bytes, acutal_storage_gib = result
return (pool.name, actual_cores_mcpu, actual_memory_bytes, acutal_storage_gib)
return None
def select_job_private(self, cloud, machine_type, storage_bytes):
if self.jpim_config.cloud != cloud:
return None
return self.jpim_config.convert_requests_to_resources(machine_type, storage_bytes)
def select_inst_coll(
self, cloud, machine_type, preemptible, worker_type, req_cores_mcpu, req_memory_bytes, req_storage_bytes
):
if worker_type is not None and machine_type is None:
result = self.select_pool_from_worker_type(
cloud=cloud,
worker_type=worker_type,
cores_mcpu=req_cores_mcpu,
memory_bytes=req_memory_bytes,
storage_bytes=req_storage_bytes,
preemptible=preemptible,
)
elif worker_type is None and machine_type is None:
result = self.select_pool_from_cost(
cloud=cloud,
cores_mcpu=req_cores_mcpu,
memory_bytes=req_memory_bytes,
storage_bytes=req_storage_bytes,
preemptible=preemptible,
)
else:
assert machine_type and machine_type in valid_machine_types(cloud)
assert worker_type is None
result = self.select_job_private(cloud=cloud, machine_type=machine_type, storage_bytes=req_storage_bytes)
return (result, None)
```
#### File: gear/gear/time_limited_max_size_cache.py
```python
import asyncio
import time
from typing import Awaitable, Callable, Dict, Generic, TypeVar
import prometheus_client as pc # type: ignore
import sortedcontainers
from prometheus_async.aio import time as prom_async_time # type: ignore
CACHE_HITS = pc.Counter('cache_hits_count', 'Number of Cache Hits', ['cache_name'])
CACHE_MISSES = pc.Counter('cache_misses_count', 'Number of Cache Hits', ['cache_name'])
CACHE_EVICTIONS = pc.Counter('cache_evictions_count', 'Number of Cache Hits', ['cache_name'])
CACHE_LOAD_LATENCY = pc.Summary(
'cache_load_latency_seconds', 'Latency of loading cache values in seconds', ['cache_name']
)
T = TypeVar('T')
U = TypeVar('U')
class TimeLimitedMaxSizeCache(Generic[T, U]):
def __init__(self, load: Callable[[T], Awaitable[U]], lifetime_ns: int, num_slots: int, cache_name: str):
assert lifetime_ns > 0
assert num_slots > 0
self.load = load
self.lifetime_ns = lifetime_ns
self.num_slots = num_slots
self.cache_name = cache_name
self._futures: Dict[T, asyncio.Future] = {}
self._cache: Dict[T, U] = {}
self._expiry_time: Dict[T, int] = {}
self._keys_by_expiry = sortedcontainers.SortedSet(key=lambda k: self._expiry_time[k])
self._shutting_down = False
async def shutdown(self):
"""Wait for all outstanding futures to complete and prevent new lookups.
This class does not manage any resources itself and this function is *not required* to be
called.
"""
self._shutting_down = True
await asyncio.wait(self._futures.values())
assert len(self._futures) == 0
async def lookup(self, k: T) -> U:
if self._shutting_down:
raise ValueError('Cache is shutting down.')
if k in self._expiry_time:
assert k in self._cache
if self._expiry_time[k] <= time.monotonic_ns():
self._remove(k)
if k in self._cache:
CACHE_HITS.labels(cache_name=self.cache_name).inc()
return self._cache[k]
CACHE_MISSES.labels(cache_name=self.cache_name).inc()
if k in self._futures:
return await self._futures[k]
self._futures[k] = asyncio.create_task(self.load(k))
try:
v = await prom_async_time(CACHE_LOAD_LATENCY.labels(cache_name=self.cache_name), self._futures[k])
finally:
del self._futures[k]
self._put(k, v)
if self._over_capacity():
CACHE_EVICTIONS.labels(cache_name=self.cache_name).inc()
self._evict_oldest()
return v
def _put(self, k: T, v: U) -> None:
expiry_time = time.monotonic_ns() + self.lifetime_ns
self._cache[k] = v
self._expiry_time[k] = expiry_time
self._keys_by_expiry.add(k)
def _remove(self, k: T) -> None:
del self._cache[k]
self._keys_by_expiry.remove(k)
del self._expiry_time[k]
def _over_capacity(self) -> bool:
return len(self._keys_by_expiry) > self.num_slots
def _evict_oldest(self) -> None:
oldest_key = self._keys_by_expiry[0]
self._remove(oldest_key)
```
#### File: hail/fs/router_fs.py
```python
from typing import List, AsyncContextManager, BinaryIO, Optional
import asyncio
import io
from hailtop.aiotools.local_fs import LocalAsyncFSURL
import nest_asyncio
import os
import functools
import glob
import fnmatch
from hailtop.aiotools.fs import Copier, Transfer, FileListEntry, ReadableStream, WritableStream
from hailtop.aiotools.local_fs import LocalAsyncFS
from hailtop.aiotools.router_fs import RouterAsyncFS
from hailtop.utils import bounded_gather, async_to_blocking
from .fs import FS
from .stat_result import FileType, StatResult
class SyncReadableStream(io.RawIOBase, BinaryIO): # type: ignore # https://github.com/python/typeshed/blob/a40d79a4e63c4e750a8d3a8012305da942251eb4/stdlib/http/client.pyi#L81
def __init__(self, ars: ReadableStream, name: str):
super().__init__()
self.ars = ars
self._mode = 'rb'
self._name = name
@property
def mode(self):
return self._mode
@property
def name(self):
return self._name
def close(self):
self.ars.close()
async_to_blocking(self.ars.wait_closed())
@property
def closed(self) -> bool:
return self.ars.closed
def fileno(self) -> int:
raise OSError
def flush(self):
pass
def isatty(self):
return False
def readable(self):
return True
def seek(self, offset, whence=None):
raise OSError
def seekable(self):
return False
def tell(self):
raise io.UnsupportedOperation
def truncate(self):
raise io.UnsupportedOperation
def writable(self):
return False
def writelines(self, lines):
raise OSError
def read(self, size=-1) -> bytes:
return async_to_blocking(self.ars.read(size))
def readall(self) -> bytes:
return async_to_blocking(self.ars.read(-1))
def readinto(self, b):
b[:] = async_to_blocking(self.ars.readexactly(len(b)))
def write(self, b):
raise OSError
class SyncWritableStream(io.RawIOBase, BinaryIO): # type: ignore # https://github.com/python/typeshed/blob/a40d79a4e63c4e750a8d3a8012305da942251eb4/stdlib/http/client.pyi#L81
def __init__(self, cm: AsyncContextManager[WritableStream], name: str):
super().__init__()
self.cm = cm
self.aws = async_to_blocking(self.cm.__aenter__())
self._mode = 'wb'
self._name = name
@property
def mode(self):
return self._mode
@property
def name(self):
return self._name
def close(self):
self.aws.close()
async_to_blocking(self.cm.__aexit__())
@property
def closed(self) -> bool:
return self.aws.closed
def fileno(self) -> int:
raise OSError
def flush(self):
pass
def isatty(self):
return False
def readable(self):
return False
def readline(self, size=-1):
raise OSError
def readlines(self, hint=-1):
raise OSError
def seek(self, offset, whence=None):
raise OSError
def seekable(self):
return False
def tell(self):
raise io.UnsupportedOperation
def truncate(self):
raise io.UnsupportedOperation
def writable(self):
return True
def read(self, size=-1):
raise OSError
def readall(self):
raise OSError
def readinto(self, b):
raise OSError
def write(self, b):
return async_to_blocking(self.aws.write(b))
def _stat_result(is_dir: bool, size_bytes: int, path: str) -> StatResult:
return StatResult(
path=path.rstrip('/'),
size=size_bytes,
typ=FileType.DIRECTORY if is_dir else FileType.FILE,
owner=None,
modification_time=None)
class RouterFS(FS):
def __init__(self, afs: RouterAsyncFS):
nest_asyncio.apply()
self.afs = afs
def open(self, path: str, mode: str = 'r', buffer_size: int = 8192) -> io.IOBase:
del buffer_size
if mode not in ('r', 'rb', 'w', 'wb'):
raise ValueError(f'Unsupported mode: {repr(mode)}')
strm: io.IOBase
if mode[0] == 'r':
strm = SyncReadableStream(async_to_blocking(self.afs.open(path)), path)
else:
assert mode[0] == 'w'
strm = SyncWritableStream(async_to_blocking(self.afs.create(path)), path)
if 'b' not in mode:
strm = io.TextIOWrapper(strm, encoding='utf-8') # type: ignore # typeshed is wrong, this *is* an IOBase
return strm
def copy(self, src: str, dest: str, *, max_simultaneous_transfers=75):
transfer = Transfer(src, dest)
async def _copy():
sema = asyncio.Semaphore(max_simultaneous_transfers)
await Copier.copy(self.afs, sema, transfer)
return async_to_blocking(_copy())
def exists(self, path: str) -> bool:
async def _exists():
dir_path = path
if dir_path[-1] != '/':
dir_path = dir_path + '/'
return any(await asyncio.gather(
self.afs.isfile(path),
self.afs.isdir(dir_path)))
return async_to_blocking(_exists())
def is_file(self, path: str) -> bool:
return async_to_blocking(self.afs.isfile(path))
async def _async_is_dir(self, path: str) -> bool:
if path[-1] != '/':
path = path + '/'
return await self.afs.isdir(path)
def is_dir(self, path: str) -> bool:
return async_to_blocking(self._async_is_dir(path))
def stat(self, path: str) -> StatResult:
size_bytes, is_dir = async_to_blocking(asyncio.gather(
self._size_bytes_or_none(path), self._async_is_dir(path)))
if size_bytes is None:
if not is_dir:
raise FileNotFoundError(path)
return _stat_result(True, 0, path)
return _stat_result(is_dir, size_bytes, path)
async def _size_bytes_or_none(self, path: str):
try:
return await (await self.afs.statfile(path)).size()
except FileNotFoundError:
return None
async def _fle_to_dict(self, fle: FileListEntry) -> StatResult:
async def size():
try:
return await (await fle.status()).size()
except IsADirectoryError:
return 0
return _stat_result(
*await asyncio.gather(fle.is_dir(), size(), fle.url()))
def ls(self,
path: str,
*,
error_when_file_and_directory: bool = True,
_max_simultaneous_files: int = 50) -> List[StatResult]:
return async_to_blocking(self._async_ls(
path,
error_when_file_and_directory=error_when_file_and_directory,
_max_simultaneous_files=_max_simultaneous_files))
async def _async_ls(self,
path: str,
*,
error_when_file_and_directory: bool = True,
_max_simultaneous_files: int = 50) -> List[StatResult]:
async def ls_no_glob(path) -> List[StatResult]:
return await self._ls_no_glob(path,
error_when_file_and_directory=error_when_file_and_directory,
_max_simultaneous_files=_max_simultaneous_files)
url = self.afs.parse_url(path)
if any(glob.escape(bucket_part) != bucket_part
for bucket_part in url.bucket_parts):
raise ValueError(f'glob pattern only allowed in path (e.g. not in bucket): {path}')
blobpath = url.path
if isinstance(url, LocalAsyncFSURL) and blobpath[0] != '/':
blobpath = './' + blobpath
components = blobpath.split('/')
assert len(components) > 0
glob_components = []
running_prefix = []
for component in components:
_raise_for_incomplete_glob_group(component, path)
if glob.escape(component) == component:
running_prefix.append(component)
else:
glob_components.append((running_prefix, component))
running_prefix = []
suffix_components: List[str] = running_prefix
if len(url.bucket_parts) > 0:
first_prefix = '/'.join([url.scheme + ':', '', *url.bucket_parts])
else:
first_prefix = url.scheme + ':'
cumulative_prefixes = [first_prefix]
for intervening_components, single_component_glob_pattern in glob_components:
cumulative_prefixes = [
stat.path
for cumulative_prefix in cumulative_prefixes
for stat in await ls_no_glob('/'.join([cumulative_prefix, *intervening_components]))
if fnmatch.fnmatch(stat.path,
'/'.join([cumulative_prefix, *intervening_components, single_component_glob_pattern]))
]
return [stat
for cumulative_prefix in cumulative_prefixes
for stat in await ls_no_glob('/'.join([cumulative_prefix, *suffix_components]))]
async def _ls_no_glob(self,
path: str,
*,
error_when_file_and_directory: bool = True,
_max_simultaneous_files: int = 50) -> List[StatResult]:
async def ls_as_dir() -> Optional[List[StatResult]]:
try:
return await bounded_gather(
*[functools.partial(self._fle_to_dict, fle)
async for fle in await self.afs.listfiles(path)],
parallelism=_max_simultaneous_files)
except (FileNotFoundError, NotADirectoryError):
return None
maybe_size, maybe_contents = await asyncio.gather(
self._size_bytes_or_none(path), ls_as_dir())
if maybe_size is not None:
file_stat = _stat_result(False, maybe_size, path)
if maybe_contents is not None:
if error_when_file_and_directory:
raise ValueError(f'{path} is both a file and a directory')
return [file_stat, *maybe_contents]
return [file_stat]
if maybe_contents is None:
raise FileNotFoundError(path)
return maybe_contents
def mkdir(self, path: str):
return async_to_blocking(self.afs.mkdir(path))
def remove(self, path: str):
return async_to_blocking(self.afs.remove(path))
def rmtree(self, path: str):
return async_to_blocking(self.afs.rmtree(None, path))
def supports_scheme(self, scheme: str) -> bool:
return scheme in self.afs.schemes
def canonicalize_path(self, path: str) -> str:
if isinstance(self.afs._get_fs(path), LocalAsyncFS):
if path.startswith('file:'):
return 'file:' + os.path.realpath(path[5:])
return 'file:' + os.path.realpath(path)
return path
def _raise_for_incomplete_glob_group(component: str, full_path: str):
i = 0
n = len(component)
open_group = False
while i < n:
c = component[i]
if c == '[':
open_group = True
if c == ']':
open_group = False
i += 1
if open_group:
raise ValueError(f'glob groups must not include forward slashes: {component} {full_path}')
```
#### File: hail/ir/ir.py
```python
import copy
from collections import defaultdict
import decorator
import hail
from hail.expr.types import dtype, HailType, hail_type, tint32, tint64, \
tfloat32, tfloat64, tstr, tbool, tarray, tstream, tndarray, tset, tdict, \
tstruct, ttuple, tinterval, tvoid
from hail.ir.blockmatrix_writer import BlockMatrixWriter, BlockMatrixMultiWriter
from hail.typecheck import typecheck, typecheck_method, sequenceof, numeric, \
sized_tupleof, nullable, tupleof, anytype, func_spec
from hail.utils.java import Env, HailUserError, warning
from hail.utils.misc import escape_str, dump_json, parsable_strings, escape_id
from .base_ir import BaseIR, IR, TableIR, MatrixIR, BlockMatrixIR, _env_bind
from .matrix_writer import MatrixWriter, MatrixNativeMultiWriter
from .renderer import Renderer, Renderable, ParensRenderer
from .table_writer import TableWriter
class I32(IR):
@typecheck_method(x=int)
def __init__(self, x):
super().__init__()
self.x = x
def _eq(self, other):
return self.x == other.x
def copy(self):
return I32(self.x)
def head_str(self):
return self.x
def _compute_type(self, env, agg_env):
self._type = tint32
class I64(IR):
@typecheck_method(x=int)
def __init__(self, x):
super().__init__()
self.x = x
def _eq(self, other):
return self.x == other.x
def copy(self):
return I64(self.x)
def head_str(self):
return self.x
def _compute_type(self, env, agg_env):
self._type = tint64
class F32(IR):
@typecheck_method(x=numeric)
def __init__(self, x):
super().__init__()
self.x = x
def _eq(self, other):
return self.x == other.x
def copy(self):
return F32(self.x)
def head_str(self):
return self.x
def _compute_type(self, env, agg_env):
self._type = tfloat32
class F64(IR):
@typecheck_method(x=numeric)
def __init__(self, x):
super().__init__()
self.x = x
def _eq(self, other):
return self.x == other.x
def copy(self):
return F64(self.x)
def head_str(self):
return self.x
def _compute_type(self, env, agg_env):
self._type = tfloat64
class Str(IR):
@typecheck_method(x=str)
def __init__(self, x):
super().__init__()
self.x = x
def _eq(self, other):
return self.x == other.x
def copy(self):
return Str(self.x)
def head_str(self):
return f'"{escape_str(self.x)}"'
def _compute_type(self, env, agg_env):
self._type = tstr
class FalseIR(IR):
def __init__(self):
super().__init__()
def copy(self):
return FalseIR()
def _ir_name(self):
return 'False'
def _compute_type(self, env, agg_env):
self._type = tbool
class TrueIR(IR):
def __init__(self):
super().__init__()
def copy(self):
return TrueIR()
def _ir_name(self):
return 'True'
def _compute_type(self, env, agg_env):
self._type = tbool
class Void(IR):
def __init__(self):
super().__init__()
def copy(self):
return Void()
def _compute_type(self, env, agg_env):
self._type = tvoid
class Cast(IR):
@typecheck_method(v=IR, typ=hail_type)
def __init__(self, v, typ):
super().__init__(v)
self.v = v
self._typ = typ
@property
def typ(self):
return self._typ
def _eq(self, other):
return self._typ == other._typ
@typecheck_method(v=IR)
def copy(self, v):
return Cast(v, self.typ)
def head_str(self):
return self._typ._parsable_string()
def _compute_type(self, env, agg_env):
self.v._compute_type(env, agg_env)
self._type = self._typ
class NA(IR):
@typecheck_method(typ=hail_type)
def __init__(self, typ):
super().__init__()
self._typ = typ
@property
def typ(self):
return self._typ
def _eq(self, other):
return self._typ == other._typ
def copy(self):
return NA(self._typ)
def head_str(self):
return self._typ._parsable_string()
def _compute_type(self, env, agg_env):
self._type = self._typ
class IsNA(IR):
@typecheck_method(value=IR)
def __init__(self, value):
super().__init__(value)
self.value = value
@typecheck_method(value=IR)
def copy(self, value):
return IsNA(value)
def _compute_type(self, env, agg_env):
self.value._compute_type(env, agg_env)
self._type = tbool
class If(IR):
@typecheck_method(cond=IR, cnsq=IR, altr=IR)
def __init__(self, cond, cnsq, altr):
super().__init__(cond, cnsq, altr)
self.cond = cond
self.cnsq = cnsq
self.altr = altr
@typecheck_method(cond=IR, cnsq=IR, altr=IR)
def copy(self, cond, cnsq, altr):
return If(cond, cnsq, altr)
def _compute_type(self, env, agg_env):
self.cond._compute_type(env, agg_env)
self.cnsq._compute_type(env, agg_env)
self.altr._compute_type(env, agg_env)
assert (self.cnsq.typ == self.altr.typ)
self._type = self.cnsq.typ
def renderable_new_block(self, i):
return i == 1 or i == 2
class Coalesce(IR):
@typecheck_method(values=IR)
def __init__(self, *values):
super().__init__(*values)
self.values = values
@typecheck_method(values=IR)
def copy(self, *values):
return Coalesce(*values)
def _compute_type(self, env, agg_env):
first, *rest = self.values
first._compute_type(env, agg_env)
for x in rest:
x._compute_type(env, agg_env)
assert x.typ == first.typ
self._type = first.typ
class Let(IR):
@typecheck_method(name=str, value=IR, body=IR)
def __init__(self, name, value, body):
super().__init__(value, body)
self.name = name
self.value = value
self.body = body
@typecheck_method(value=IR, body=IR)
def copy(self, value, body):
return Let(self.name, value, body)
def head_str(self):
return escape_id(self.name)
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _eq(self, other):
return other.name == self.name
def _compute_type(self, env, agg_env):
self.value._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = self.body._type
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.value._type
else:
value = default_value
return {self.name: value}
else:
return {}
class AggLet(IR):
@typecheck_method(name=str, value=IR, body=IR, is_scan=bool)
def __init__(self, name, value, body, is_scan):
super().__init__(value, body)
self.name = name
self.value = value
self.body = body
self.is_scan = is_scan
@typecheck_method(value=IR, body=IR)
def copy(self, value, body):
return AggLet(self.name, value, body, self.is_scan)
def head_str(self):
return escape_id(self.name) + " " + str(self.is_scan)
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _eq(self, other):
return other.name == self.name and other.is_scan == self.is_scan
def _compute_type(self, env, agg_env):
self.value._compute_type(agg_env, None)
self.body._compute_type(env, _env_bind(agg_env, {self.name: self.value._type}))
self._type = self.body._type
def renderable_agg_bindings(self, i, default_value=None):
if not self.is_scan and i == 1:
if default_value is None:
value = self.value._type
else:
value = default_value
return {self.name: value}
else:
return {}
def renderable_scan_bindings(self, i, default_value=None):
if self.is_scan and i == 1:
if default_value is None:
value = self.value._type
else:
value = default_value
return {self.name: value}
else:
return {}
def renderable_uses_agg_context(self, i: int) -> bool:
return not self.is_scan and i == 0
def renderable_uses_scan_context(self, i: int) -> bool:
return self.is_scan and i == 0
class Ref(IR):
@typecheck_method(name=str)
def __init__(self, name):
super().__init__()
self.name = name
self._free_vars = {name}
def copy(self):
return Ref(self.name)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return other.name == self.name
def _compute_type(self, env, agg_env):
self._type = env[self.name]
class TopLevelReference(Ref):
@typecheck_method(name=str)
def __init__(self, name):
super().__init__(name)
@property
def is_nested_field(self):
return True
def copy(self):
return TopLevelReference(self.name)
def _ir_name(self):
return 'Ref'
def _compute_type(self, env, agg_env):
assert self.name in env, f'{self.name} not found in {env}'
self._type = env[self.name]
class TailLoop(IR):
@typecheck_method(name=str, body=IR, params=sequenceof(sized_tupleof(str, IR)))
def __init__(self, name, body, params):
super().__init__(*([v for n, v in params] + [body]))
self.name = name
self.params = params
self.body = body
def copy(self, *children):
params = children[:-1]
body = children[-1]
assert len(params) == len(self.params)
return TailLoop(self.name, [(n, v) for (n, _), v in zip(self.params, params)], body)
def head_str(self):
return f'{escape_id(self.name)} ({" ".join([escape_id(n) for n, _ in self.params])})'
def _eq(self, other):
return self.name == other.name
@property
def bound_variables(self):
return {n for n, _ in self.params} | {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
for _, b in self.params:
b._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(len(self.params))), agg_env)
self._type = self.body.typ
def renderable_bindings(self, i, default_value=None):
if i == len(self.params):
if default_value is None:
return {self.name: None, **{n: v.typ for n, v in self.params}}
else:
value = default_value
return {self.name: value, **{n: value for n, _ in self.params}}
else:
return {}
class Recur(IR):
@typecheck_method(name=str, args=sequenceof(IR), return_type=hail_type)
def __init__(self, name, args, return_type):
super().__init__(*args)
self.name = name
self.args = args
self.return_type = return_type
self._free_vars = {name}
def copy(self, args):
return Recur(self.name, args, self.return_type)
def head_str(self):
return f'{escape_id(self.name)} {self.return_type._parsable_string()}'
def _eq(self, other):
return other.name == self.name
def _compute_type(self, env, agg_env):
assert self.name in env
self._type = self.return_type
class ApplyBinaryPrimOp(IR):
@typecheck_method(op=str, left=IR, right=IR)
def __init__(self, op, left, right):
super().__init__(left, right)
self.op = op
self.left = left
self.right = right
@typecheck_method(left=IR, right=IR)
def copy(self, left, right):
return ApplyBinaryPrimOp(self.op, left, right)
def head_str(self):
return escape_id(self.op)
def _eq(self, other):
return other.op == self.op
def _compute_type(self, env, agg_env):
self.left._compute_type(env, agg_env)
self.right._compute_type(env, agg_env)
if self.op == '/':
int_types = [tint32, tint64]
if self.left.typ in int_types and self.right.typ in int_types:
self._type = tfloat64
elif self.left.typ == tfloat64:
self._type = tfloat64
else:
self._type = tfloat32
else:
self._type = self.left.typ
class ApplyUnaryPrimOp(IR):
@typecheck_method(op=str, x=IR)
def __init__(self, op, x):
super().__init__(x)
self.op = op
self.x = x
@typecheck_method(x=IR)
def copy(self, x):
return ApplyUnaryPrimOp(self.op, x)
def head_str(self):
return escape_id(self.op)
def _eq(self, other):
return other.op == self.op
def _compute_type(self, env, agg_env):
self.x._compute_type(env, agg_env)
self._type = self.x.typ
class ApplyComparisonOp(IR):
@typecheck_method(op=str, left=IR, right=IR)
def __init__(self, op, left, right):
super().__init__(left, right)
self.op = op
self.left = left
self.right = right
@typecheck_method(left=IR, right=IR)
def copy(self, left, right):
return ApplyComparisonOp(self.op, left, right)
def head_str(self):
return escape_id(self.op)
def _eq(self, other):
return other.op == self.op
def _compute_type(self, env, agg_env):
self.left._compute_type(env, agg_env)
self.right._compute_type(env, agg_env)
self._type = tbool
class MakeArray(IR):
@typecheck_method(args=sequenceof(IR), type=nullable(hail_type))
def __init__(self, args, type):
super().__init__(*args)
self.args = args
self._type = type
def copy(self, *args):
return MakeArray(args, self._type)
def head_str(self):
return self._type._parsable_string() if self._type is not None else 'None'
def _eq(self, other):
return other._type == self._type
def _compute_type(self, env, agg_env):
for a in self.args:
a._compute_type(env, agg_env)
if self._type is None:
self._type = tarray(self.args[0].typ)
class ArrayRef(IR):
@typecheck_method(a=IR, i=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, a, i, error_id=None, stack_trace=None):
super().__init__(a, i)
self.a = a
self.i = i
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(a=IR, i=IR)
def copy(self, a, i):
return ArrayRef(a, i, self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.i._compute_type(env, agg_env)
self._type = self.a.typ.element_type
class ArraySlice(IR):
@typecheck_method(a=IR, start=IR, stop=nullable(IR), step=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, a, start, stop, step, error_id=None, stack_trace=None):
if stop is not None:
super().__init__(a, start, stop, step)
else:
super().__init__(a, start, step)
self.a = a
self.start = start
self.stop = stop
self.step = step
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(a=IR, start=IR, stop=nullable(IR), step=IR)
def copy(self, a, start, stop, step):
return ArraySlice(a, start, stop, step, self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.start._compute_type(env, agg_env)
if self.stop is not None:
self.stop._compute_type(env, agg_env)
self.step._compute_type(env, agg_env)
self._type = self.a.typ
class ArrayLen(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
return ArrayLen(a)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tint32
class ArrayZeros(IR):
@typecheck_method(length=IR)
def __init__(self, length):
super().__init__(length)
self.length = length
@typecheck_method(length=IR)
def copy(self, length):
return ArrayZeros(length)
def _compute_type(self, env, agg_env):
self.length._compute_type(env, agg_env)
self._type = tarray(tint32)
class StreamIota(IR):
@typecheck_method(start=IR, step=IR, requires_memory_management_per_element=bool)
def __init__(self, start, step, requires_memory_management_per_element=False):
super().__init__(start, step)
self.start = start
self.step = step
self.requires_memory_management_per_element = requires_memory_management_per_element
@typecheck_method(start=IR, step=IR)
def copy(self, start, step):
return StreamIota(start, step,
requires_memory_management_per_element=self.requires_memory_management_per_element)
def head_str(self):
return f'{self.requires_memory_management_per_element}'
def _compute_type(self, env, agg_env):
self.start._compute_type(env, agg_env)
self.step._compute_type(env, agg_env)
self._type = tstream(tint32)
class StreamRange(IR):
@typecheck_method(start=IR, stop=IR, step=IR, requires_memory_management_per_element=bool,
error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, start, stop, step, requires_memory_management_per_element=False,
error_id=None, stack_trace=None):
super().__init__(start, stop, step)
self.start = start
self.stop = stop
self.step = step
self.requires_memory_management_per_element = requires_memory_management_per_element
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(start=IR, stop=IR, step=IR)
def copy(self, start, stop, step):
return StreamRange(start, stop, step, error_id=self._error_id, stack_trace=self._stack_trace)
def head_str(self):
return f'{self._error_id} {self.requires_memory_management_per_element}'
def _compute_type(self, env, agg_env):
self.start._compute_type(env, agg_env)
self.stop._compute_type(env, agg_env)
self.step._compute_type(env, agg_env)
self._type = tstream(tint32)
class StreamGrouped(IR):
@typecheck_method(stream=IR, group_size=IR)
def __init__(self, stream, group_size):
super().__init__(stream, group_size)
self.stream = stream
self.group_size = group_size
@typecheck_method(stream=IR, group_size=IR)
def copy(self, stream=IR, group_size=IR):
return StreamGrouped(stream, group_size)
def _compute_type(self, env, agg_env):
self.stream._compute_type(env, agg_env)
self._type = tstream(self.stream._type)
class MakeNDArray(IR):
@typecheck_method(data=IR, shape=IR, row_major=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, data, shape, row_major, error_id=None, stack_trace=None):
super().__init__(data, shape, row_major)
self.data = data
self.shape = shape
self.row_major = row_major
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(data=IR, shape=IR, row_major=IR)
def copy(self, data, shape, row_major):
return MakeNDArray(data, shape, row_major, self._error_id, self._stack_trace)
def _compute_type(self, env, agg_env):
self.data._compute_type(env, agg_env)
self.shape._compute_type(env, agg_env)
self.row_major._compute_type(env, agg_env)
self._type = tndarray(self.data.typ.element_type, len(self.shape.typ))
def head_str(self):
return f'{self._error_id}'
class NDArrayShape(IR):
@typecheck_method(nd=IR)
def __init__(self, nd):
super().__init__(nd)
self.nd = nd
@typecheck_method(nd=IR)
def copy(self, nd):
return NDArrayShape(nd)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self._type = ttuple(*[tint64 for _ in range(self.nd.typ.ndim)])
class NDArrayReshape(IR):
@typecheck_method(nd=IR, shape=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, nd, shape, error_id=None, stack_trace=None):
super().__init__(nd, shape)
self.nd = nd
self.shape = shape
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self, nd, shape):
return NDArrayReshape(nd, shape, self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self.shape._compute_type(env, agg_env)
self._type = tndarray(self.nd.typ.element_type, len(self.shape.typ))
class NDArrayMap(IR):
@typecheck_method(nd=IR, name=str, body=IR)
def __init__(self, nd, name, body):
super().__init__(nd, body)
self.nd = nd
self.name = name
self.body = body
@typecheck_method(nd=IR, body=IR)
def copy(self, nd, body):
return NDArrayMap(nd, self.name, body)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return self.name == other.name
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = tndarray(self.body.typ, self.nd.typ.ndim)
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.nd.typ.element_type
else:
value = default_value
return {self.name: value}
else:
return {}
class NDArrayMap2(IR):
@typecheck_method(left=IR, right=IR, lname=str, rname=str, body=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, left, right, lname, rname, body, error_id=None, stack_trace=None):
super().__init__(left, right, body)
self.right = right
self.left = left
self.lname = lname
self.rname = rname
self.body = body
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(l=IR, r=IR, body=IR)
def copy(self, left, right, body):
return NDArrayMap2(left, right, self.lname, self.rname, body, self._error_id, self._stack_trace)
def head_str(self):
return f'{self._error_id} {escape_id(self.lname)} {escape_id(self.rname)}'
def _eq(self, other):
return self.lname == other.lname and \
self.rname == other.rname
@property
def bound_variables(self):
return {self.lname, self.rname} | super().bound_variables
def _compute_type(self, env, agg_env):
self.left._compute_type(env, agg_env)
self.right._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(2)), agg_env)
self._type = tndarray(self.body.typ, self.left.typ.ndim)
def renderable_bindings(self, i, default_value=None):
if i == 2:
if default_value is None:
return {self.lname: self.left.typ.element_type, self.rname: self.right.typ.element_type}
else:
return {self.lname: default_value, self.rname: default_value}
else:
return {}
class NDArrayRef(IR):
@typecheck_method(nd=IR, idxs=sequenceof(IR), error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, nd, idxs, error_id=None, stack_trace=None):
super().__init__(nd, *idxs)
self.nd = nd
self.idxs = idxs
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self, *args):
return NDArrayRef(args[0], args[1:], self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
[idx._compute_type(env, agg_env) for idx in self.idxs]
self._type = self.nd.typ.element_type
class NDArraySlice(IR):
@typecheck_method(nd=IR, slices=IR)
def __init__(self, nd, slices):
super().__init__(nd, slices)
self.nd = nd
self.slices = slices
def copy(self, nd, slices):
return NDArraySlice(nd, slices)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self.slices._compute_type(env, agg_env)
self._type = tndarray(self.nd.typ.element_type,
len([t for t in self.slices.typ.types if isinstance(t, ttuple)]))
class NDArrayReindex(IR):
@typecheck_method(nd=IR, idx_expr=sequenceof(int))
def __init__(self, nd, idx_expr):
super().__init__(nd)
self.nd = nd
self.idx_expr = idx_expr
@typecheck_method(nd=IR)
def copy(self, nd):
return NDArrayReindex(nd, self.idx_expr)
def head_str(self):
return f'({" ".join([str(i) for i in self.idx_expr])})'
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
n_input_dims = self.nd.typ.ndim
n_output_dims = len(self.idx_expr)
assert n_input_dims <= n_output_dims
assert all([i < n_output_dims for i in self.idx_expr])
assert all([i in self.idx_expr for i in range(n_output_dims)])
self._type = tndarray(self.nd.typ.element_type, n_output_dims)
class NDArrayAgg(IR):
@typecheck_method(nd=IR, axes=sequenceof(int))
def __init__(self, nd, axes):
super().__init__(nd)
self.nd = nd
self.axes = axes
@typecheck_method(nd=IR)
def copy(self, nd):
return NDArrayAgg(nd, self.axes)
def head_str(self):
return f'({" ".join([str(i) for i in self.axes])})'
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
assert len(set(self.axes)) == len(self.axes)
assert all([axis < self.nd.typ.ndim for axis in self.axes])
self._type = tndarray(self.nd.typ.element_type, self.nd.typ.ndim - len(self.axes))
class NDArrayMatMul(IR):
@typecheck_method(left=IR, right=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, left, right, error_id=None, stack_trace=None):
super().__init__(left, right)
self.left = left
self.right = right
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(left=IR, right=IR)
def copy(self, left, right):
return NDArrayMatMul(left, right, self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.left._compute_type(env, agg_env)
self.right._compute_type(env, agg_env)
ndim = hail.linalg.utils.misc._ndarray_matmul_ndim(self.left.typ.ndim, self.right.typ.ndim)
from hail.expr.expressions import unify_types
self._type = tndarray(unify_types(self.left.typ.element_type,
self.right.typ.element_type), ndim)
class NDArrayQR(IR):
@typecheck_method(nd=IR, mode=str, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, nd, mode, error_id=None, stack_trace=None):
super().__init__(nd)
self.nd = nd
self.mode = mode
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self):
return NDArrayQR(self.nd, self.mode, self._error_id, self._stack_trace)
def head_str(self):
return f'{self._error_id} "{self.mode}"'
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
if self.mode in ["complete", "reduced"]:
self._type = ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 2))
elif self.mode == "raw":
self._type = ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 1))
elif self.mode == "r":
self._type = tndarray(tfloat64, 2)
else:
raise ValueError("Cannot compute type for mode: " + self.mode)
class NDArraySVD(IR):
@typecheck_method(nd=IR, full_matrices=bool, compute_uv=bool, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, nd, full_matrices, compute_uv, error_id=None, stack_trace=None):
super().__init__(nd)
self.nd = nd
self.full_matrices = full_matrices
self.compute_uv = compute_uv
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self):
return NDArraySVD(self.nd, self.full_matrices, self.compute_uv, self._error_id, self._stack_trace)
def head_str(self):
return f'{self._error_id} {self.full_matrices} {self.compute_uv}'
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
if self.compute_uv:
self._type = ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 1), tndarray(tfloat64, 2))
else:
self._type = tndarray(tfloat64, 1)
class NDArrayInv(IR):
@typecheck_method(nd=IR, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, nd, error_id=None, stack_trace=None):
super().__init__(nd)
self.nd = nd
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self):
return NDArrayInv(self.nd, self._error_id, self._stack_trace)
def head_str(self):
return str(self._error_id)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self._type = tndarray(tfloat64, 2)
class NDArrayConcat(IR):
@typecheck_method(nds=IR, axis=int)
def __init__(self, nds, axis):
super().__init__(nds)
self.nds = nds
self.axis = axis
def copy(self):
return NDArrayConcat(self.nds, self.axis)
def head_str(self):
return self.axis
def _eq(self, other):
return other.nds == self.nds and \
other.axis == self.axis
def _compute_type(self, env, agg_env):
self.nds._compute_type(env, agg_env)
self._type = self.nds.typ.element_type
class NDArrayWrite(IR):
@typecheck_method(nd=IR, path=IR)
def __init__(self, nd, path):
super().__init__(nd, path)
self.nd = nd
self.path = path
@typecheck_method(nd=IR, path=IR)
def copy(self, nd, path):
return NDArrayWrite(nd, path)
def _compute_type(self, env, agg_env):
self.nd._compute_type(env, agg_env)
self.path._compute_type(env, agg_env)
self._type = tvoid
@staticmethod
def is_effectful() -> bool:
return True
class ArraySort(IR):
@typecheck_method(a=IR, l_name=str, r_name=str, compare=IR)
def __init__(self, a, l_name, r_name, compare):
super().__init__(a, compare)
self.a = a
self.l_name = l_name
self.r_name = r_name
self.compare = compare
@typecheck_method(a=IR, compare=IR)
def copy(self, a, compare):
return ArraySort(a, self.l_name, self.r_name, compare)
def head_str(self):
return f'{escape_id(self.l_name)} {escape_id(self.r_name)}'
@property
def bound_variables(self):
return {self.l_name, self.r_name} | super().bound_variables
def _eq(self, other):
return other.l_name == self.l_name and other.r_name == self.r_name
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tarray(self.a.typ.element_type)
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.a.typ.element_type
else:
value = default_value
return {self.l_name: value, self.r_name: value}
else:
return {}
class ToSet(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
return ToSet(a)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tset(self.a.typ.element_type)
class ToDict(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
return ToDict(a)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tdict(self.a.typ['key'], self.a.typ['value'])
class ToArray(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
return ToArray(a)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tarray(self.a.typ.element_type)
class CastToArray(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
return CastToArray(a)
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tarray(self.a.typ.element_type)
class ToStream(IR):
@typecheck_method(a=IR, requires_memory_management_per_element=bool)
def __init__(self, a, requires_memory_management_per_element=False):
super().__init__(a)
self.a = a
self.requires_memory_management_per_element = requires_memory_management_per_element
@typecheck_method(a=IR)
def copy(self, a):
return ToStream(a)
def head_str(self):
return self.requires_memory_management_per_element
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self._type = tstream(self.a.typ.element_type)
class LowerBoundOnOrderedCollection(IR):
@typecheck_method(ordered_collection=IR, elem=IR, on_key=bool)
def __init__(self, ordered_collection, elem, on_key):
super().__init__(ordered_collection, elem)
self.ordered_collection = ordered_collection
self.elem = elem
self.on_key = on_key
@typecheck_method(ordered_collection=IR, elem=IR)
def copy(self, ordered_collection, elem):
return LowerBoundOnOrderedCollection(ordered_collection, elem, self.on_key)
def head_str(self):
return self.on_key
def _compute_type(self, env, agg_env):
self.ordered_collection._compute_type(env, agg_env)
self.elem._compute_type(env, agg_env)
self._type = tint32
class GroupByKey(IR):
@typecheck_method(collection=IR)
def __init__(self, collection):
super().__init__(collection)
self.collection = collection
@typecheck_method(collection=IR)
def copy(self, collection):
return GroupByKey(collection)
def _compute_type(self, env, agg_env):
self.collection._compute_type(env, agg_env)
self._type = tdict(self.collection.typ.element_type.types[0],
tarray(self.collection.typ.element_type.types[1]))
class StreamMap(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
return StreamMap(a, self.name, body)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return self.name == other.name
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = tstream(self.body.typ)
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.a.typ.element_type
else:
value = default_value
return {self.name: value}
else:
return {}
class StreamZip(IR):
@typecheck_method(streams=sequenceof(IR), names=sequenceof(str), body=IR, behavior=str,
error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, streams, names, body, behavior, error_id=None, stack_trace=None):
super().__init__(*streams, body)
self.streams = streams
self.names = names
self.body = body
self.behavior = behavior
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@typecheck_method(children=IR)
def copy(self, *children):
return StreamZip(children[:-1], self.names, children[-1], self.behavior, self._error_id, self._stack_trace)
def head_str(self):
return f'{self._error_id} {escape_id(self.behavior)} ({" ".join(map(escape_id, self.names))})'
def _eq(self, other):
return self.names == other.names and self.behavior == other.behavior
@property
def bound_variables(self):
return set(self.names) | super().bound_variables
def _compute_type(self, env, agg_env):
for a in self.streams:
a._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(len(self.names))), agg_env)
self._type = tstream(self.body.typ)
def renderable_bindings(self, i, default_value=None):
if i == len(self.names):
return {name: default_value if default_value is not None else a.typ.element_type for name, a in zip(self.names, self.streams)}
else:
return {}
class StreamFilter(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
return StreamFilter(a, self.name, body)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return self.name == other.name
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = self.a.typ
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.a.typ.element_type
else:
value = default_value
return {self.name: value}
else:
return {}
class StreamFlatMap(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
return StreamFlatMap(a, self.name, body)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return self.name == other.name
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = tstream(self.body.typ.element_type)
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.a.typ.element_type
else:
value = default_value
return {self.name: value}
return {}
class StreamFold(IR):
@typecheck_method(a=IR, zero=IR, accum_name=str, value_name=str, body=IR)
def __init__(self, a, zero, accum_name, value_name, body):
super().__init__(a, zero, body)
self.a = a
self.zero = zero
self.accum_name = accum_name
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, zero=IR, body=IR)
def copy(self, a, zero, body):
return StreamFold(a, zero, self.accum_name, self.value_name, body)
def head_str(self):
return f'{escape_id(self.accum_name)} {escape_id(self.value_name)}'
def _eq(self, other):
return other.accum_name == self.accum_name and \
other.value_name == self.value_name
@property
def bound_variables(self):
return {self.accum_name, self.value_name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.zero._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(2)), agg_env)
self._type = self.zero.typ
def renderable_bindings(self, i, default_value=None):
if i == 2:
if default_value is None:
return {self.accum_name: self.zero.typ, self.value_name: self.a.typ.element_type}
else:
return {self.accum_name: default_value, self.value_name: default_value}
else:
return {}
class StreamScan(IR):
@typecheck_method(a=IR, zero=IR, accum_name=str, value_name=str, body=IR)
def __init__(self, a, zero, accum_name, value_name, body):
super().__init__(a, zero, body)
self.a = a
self.zero = zero
self.accum_name = accum_name
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, zero=IR, body=IR)
def copy(self, a, zero, body):
return StreamScan(a, zero, self.accum_name, self.value_name, body)
def head_str(self):
return f'{escape_id(self.accum_name)} {escape_id(self.value_name)}'
def _eq(self, other):
return other.accum_name == self.accum_name and \
other.value_name == self.value_name
@property
def bound_variables(self):
return {self.accum_name, self.value_name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.zero._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(2)), agg_env)
self._type = tstream(self.body.typ)
def renderable_bindings(self, i, default_value=None):
if i == 2:
if default_value is None:
return {self.accum_name: self.zero.typ, self.value_name: self.a.typ.element_type}
else:
return {self.accum_name: default_value, self.value_name: default_value}
else:
return {}
class StreamJoinRightDistinct(IR):
@typecheck_method(left=IR, right=IR, l_key=sequenceof(str), r_key=sequenceof(str), l_name=str, r_name=str, join=IR, join_type=str)
def __init__(self, left, right, l_key, r_key, l_name, r_name, join, join_type):
super().__init__(left, right, join)
self.left = left
self.right = right
self.l_key = l_key
self.r_key = r_key
self.l_name = l_name
self.r_name = r_name
self.join = join
self.join_type = join_type
@typecheck_method(left=IR, right=IR, join=IR)
def copy(self, left, right, join):
return StreamJoinRightDistinct(left, right, self.l_key, self.r_key, self.l_name, self.r_name, join, self.join_type)
def head_str(self):
return '({}) ({}) {} {} {}'.format(
' '.join([escape_id(x) for x in self.l_key]),
' '.join([escape_id(x) for x in self.r_key]),
self.l_name,
self.r_name,
self.join_type)
def _eq(self, other):
return other.l_name == self.l_name and \
other.r_name == self.r_name and \
other.join_type == self.join_type
@property
def bound_variables(self):
return {self.l_name, self.r_name} | super().bound_variables
def renderable_bindings(self, i, default_value=None):
if i == 2:
if default_value is None:
return {self.l_name: self.left.typ.element_type,
self.r_name: self.right.typ.element_type}
else:
return {self.l_name: default_value,
self.r_name: default_value}
else:
return {}
class StreamFor(IR):
@typecheck_method(a=IR, value_name=str, body=IR)
def __init__(self, a, value_name, body):
super().__init__(a, body)
self.a = a
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
return StreamFor(a, self.value_name, body)
def head_str(self):
return escape_id(self.value_name)
def _eq(self, other):
return self.value_name == other.value_name
@property
def bound_variables(self):
return {self.value_name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.a._compute_type(env, agg_env)
self.body._compute_type(_env_bind(env, self.bindings(1)), agg_env)
self._type = tvoid
def renderable_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.a.typ.element_type
else:
value = default_value
return {self.value_name: value}
else:
return {}
class AggFilter(IR):
@typecheck_method(cond=IR, agg_ir=IR, is_scan=bool)
def __init__(self, cond, agg_ir, is_scan):
super().__init__(cond, agg_ir)
self.cond = cond
self.agg_ir = agg_ir
self.is_scan = is_scan
@typecheck_method(cond=IR, agg_ir=IR)
def copy(self, cond, agg_ir):
return AggFilter(cond, agg_ir, self.is_scan)
def head_str(self):
return str(self.is_scan)
def _eq(self, other):
return self.is_scan == other.is_scan
def _compute_type(self, env, agg_env):
self.cond._compute_type(agg_env, None)
self.agg_ir._compute_type(env, agg_env)
self._type = self.agg_ir.typ
def renderable_uses_agg_context(self, i: int):
return i == 0 and not self.is_scan
def renderable_bindings(self, i, default_value=None):
if i == 1:
return {BaseIR.agg_capability: default_value}
else:
return {}
def renderable_uses_scan_context(self, i: int):
return i == 0 and self.is_scan
@classmethod
def uses_agg_capability(cls) -> bool:
return True
class AggExplode(IR):
@typecheck_method(s=IR, name=str, agg_body=IR, is_scan=bool)
def __init__(self, s, name, agg_body, is_scan):
super().__init__(s, agg_body)
self.name = name
self.s = s
self.agg_body = agg_body
self.is_scan = is_scan
@typecheck_method(s=IR, agg_body=IR)
def copy(self, s, agg_body):
return AggExplode(s, self.name, agg_body, self.is_scan)
def head_str(self):
return f'{escape_id(self.name)} {self.is_scan}'
def _eq(self, other):
return self.name == other.name and self.is_scan == other.is_scan
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def _compute_type(self, env, agg_env):
self.s._compute_type(agg_env, None)
self.agg_body._compute_type(env, _env_bind(agg_env, self.agg_bindings(1)))
self._type = self.agg_body.typ
def renderable_bindings(self, i, default_value=None):
if i == 1:
return {BaseIR.agg_capability: default_value}
else:
return {}
def renderable_agg_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.s.typ.element_type
else:
value = default_value
return {self.name: value}
else:
return {}
def renderable_scan_bindings(self, i, default_value=None):
return self.renderable_agg_bindings(i, default_value)
def renderable_uses_agg_context(self, i: int):
return i == 0 and not self.is_scan
def renderable_uses_scan_context(self, i: int):
return i == 0 and self.is_scan
@classmethod
def uses_agg_capability(cls) -> bool:
return True
class AggGroupBy(IR):
@typecheck_method(key=IR, agg_ir=IR, is_scan=bool)
def __init__(self, key, agg_ir, is_scan):
super().__init__(key, agg_ir)
self.key = key
self.agg_ir = agg_ir
self.is_scan = is_scan
@typecheck_method(key=IR, agg_ir=IR)
def copy(self, key, agg_ir):
return AggGroupBy(key, agg_ir, self.is_scan)
def head_str(self):
return str(self.is_scan)
def _eq(self, other):
return self.is_scan == other.is_scan
def _compute_type(self, env, agg_env):
self.key._compute_type(agg_env, None)
self.agg_ir._compute_type(env, agg_env)
self._type = tdict(self.key.typ, self.agg_ir.typ)
def renderable_bindings(self, i, default_value=None):
if i == 1:
return {BaseIR.agg_capability: default_value}
else:
return {}
def renderable_uses_agg_context(self, i: int):
return i == 0 and not self.is_scan
def renderable_uses_scan_context(self, i: int):
return i == 0 and self.is_scan
@classmethod
def uses_agg_capability(cls) -> bool:
return True
class AggArrayPerElement(IR):
@typecheck_method(array=IR, element_name=str, index_name=str, agg_ir=IR, is_scan=bool)
def __init__(self, array, element_name, index_name, agg_ir, is_scan):
super().__init__(array, agg_ir)
self.array = array
self.element_name = element_name
self.index_name = index_name
self.agg_ir = agg_ir
self.is_scan = is_scan
@typecheck_method(array=IR, agg_ir=IR)
def copy(self, array, agg_ir):
return AggArrayPerElement(array, self.element_name, self.index_name, agg_ir, self.is_scan)
def head_str(self):
return f'{escape_id(self.element_name)} {escape_id(self.index_name)} {self.is_scan} False'
def _eq(self, other):
return self.element_name == other.element_name and self.index_name == other.index_name and self.is_scan == other.is_scan
def _compute_type(self, env, agg_env):
self.array._compute_type(agg_env, None)
self.agg_ir._compute_type(_env_bind(env, self.bindings(1)),
_env_bind(agg_env, self.agg_bindings(1)))
self._type = tarray(self.agg_ir.typ)
@property
def bound_variables(self):
return {self.element_name, self.index_name} | super().bound_variables
def renderable_uses_agg_context(self, i: int):
return i == 0 and not self.is_scan
def renderable_uses_scan_context(self, i: int):
return i == 0 and self.is_scan
@classmethod
def uses_agg_capability(cls) -> bool:
return True
def renderable_bindings(self, i, default_value=None):
if i == 1:
value = tint32 if default_value is None else default_value
return {self.index_name: value, BaseIR.agg_capability: default_value}
else:
return {}
def renderable_agg_bindings(self, i, default_value=None):
if i == 1:
if default_value is None:
value = self.array.typ.element_type
else:
value = default_value
return {self.element_name: value}
else:
return {}
def renderable_scan_bindings(self, i, default_value=None):
return self.renderable_agg_bindings(i, default_value)
def _register(registry, name, f):
registry[name].append(f)
_aggregator_registry = defaultdict(list)
def register_aggregator(name, init_params, seq_params, ret_type):
_register(_aggregator_registry, name, (init_params, seq_params, ret_type))
def lookup_aggregator_return_type(name, init_args, seq_args):
if name in _aggregator_registry:
fns = _aggregator_registry[name]
for f in fns:
(init_params, seq_params, ret_type) = f
for p in init_params:
p.clear()
for p in seq_params:
p.clear()
if (all(p.unify(a) for p, a in zip(init_params, init_args))
and all(p.unify(a) for p, a in zip(seq_params, seq_args))):
return ret_type.subst()
raise KeyError(f'aggregator {name}({ ",".join([str(t) for t in seq_args]) }) not found')
class BaseApplyAggOp(IR):
@typecheck_method(agg_op=str,
init_op_args=sequenceof(IR),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, init_op_args, seq_op_args):
super().__init__(*init_op_args, *seq_op_args)
self.agg_op = agg_op
self.init_op_args = init_op_args
self.seq_op_args = seq_op_args
def copy(self, *args):
new_instance = self.__class__
n_seq_op_args = len(self.seq_op_args)
init_op_args = args[:len(self.init_op_args)]
seq_op_args = args[-n_seq_op_args:]
return new_instance(self.agg_op, init_op_args, seq_op_args)
def head_str(self):
return f'{self.agg_op}'
# Overloaded to add space after 'agg_op' even if there are no children.
def render_head(self, r):
return f'({self._ir_name()} {self.agg_op} '
def render_children(self, r):
return [
ParensRenderer(self.init_op_args),
ParensRenderer(self.seq_op_args)
]
@property
def aggregations(self):
assert all(map(lambda c: len(c.aggregations) == 0, self.children))
return [self]
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.agg_op == self.agg_op and \
other.init_op_args == self.init_op_args and \
other.seq_op_args == self.seq_op_args
def __hash__(self):
return hash(tuple([self.agg_op,
tuple(self.init_op_args),
tuple(self.seq_op_args)]))
def _compute_type(self, env, agg_env):
for a in self.init_op_args:
a._compute_type(env, agg_env)
for a in self.seq_op_args:
a._compute_type(agg_env, None)
self._type = lookup_aggregator_return_type(
self.agg_op,
[a.typ for a in self.init_op_args],
[a.typ for a in self.seq_op_args])
def renderable_new_block(self, i: int) -> bool:
return i == 0
def renderable_idx_of_child(self, i: int) -> int:
if i < len(self.init_op_args):
return 0
return 1
@classmethod
def uses_agg_capability(cls) -> bool:
return True
class ApplyAggOp(BaseApplyAggOp):
@typecheck_method(agg_op=str,
init_op_args=sequenceof(IR),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, init_op_args, seq_op_args):
super().__init__(agg_op, init_op_args, seq_op_args)
def renderable_uses_agg_context(self, i: int):
return i == 1
class ApplyScanOp(BaseApplyAggOp):
@typecheck_method(agg_op=str,
init_op_args=sequenceof(IR),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, init_op_args, seq_op_args):
super().__init__(agg_op, init_op_args, seq_op_args)
def renderable_uses_scan_context(self, i: int):
return i == 1
class AggFold(IR):
@typecheck_method(zero=IR, seq_op=IR, comb_op=IR, accum_name=str, other_accum_name=str, is_scan=bool)
def __init__(self, zero, seq_op, comb_op, accum_name, other_accum_name, is_scan):
super().__init__(zero, seq_op, comb_op)
self.zero = zero
self.seq_op = seq_op
self.comb_op = comb_op
self.accum_name = accum_name
self.other_accum_name = other_accum_name
self.is_scan = is_scan
if self.comb_op.free_vars - {accum_name, other_accum_name} != set([]):
raise HailUserError("The comb_op function of fold cannot reference any fields on the Table or MatrixTable")
def copy(self, zero, seq_op, comb_op):
return AggFold(zero, seq_op, comb_op, self.accum_name, self.other_accum_name, self.is_scan)
def head_str(self):
return f"{self.accum_name} {self.other_accum_name} {self.is_scan}"
def _compute_type(self, env, agg_env):
self.zero._compute_type(env, agg_env)
self.seq_op._compute_type(_env_bind(agg_env, self.bindings(1)), None)
self.comb_op._compute_type(self.bindings(2), None)
assert self.zero._type == self.seq_op._type
assert self.zero._type == self.comb_op._type
self._type = self.zero._type
def renderable_bindings(self, i: int, default_value=None):
dict_so_far = {}
if i == 1 or i == 2:
if default_value is None:
dict_so_far[self.accum_name] = self.zero.typ
else:
dict_so_far[self.accum_name] = default_value
if i == 2:
if default_value is None:
dict_so_far[self.other_accum_name] = self.zero.typ
else:
dict_so_far[self.other_accum_name] = default_value
return dict_so_far
def renderable_new_block(self, i: int) -> bool:
return i > 0
class Begin(IR):
@typecheck_method(xs=sequenceof(IR))
def __init__(self, xs):
super().__init__(*xs)
self.xs = xs
def copy(self, *xs):
return Begin(xs)
def _compute_type(self, env, agg_env):
for x in self.xs:
x._compute_type(env, agg_env)
self._type = tvoid
class MakeStruct(IR):
@typecheck_method(fields=sequenceof(sized_tupleof(str, IR)))
def __init__(self, fields):
super().__init__(*[ir for (n, ir) in fields])
self.fields = fields
def copy(self, *irs):
assert len(irs) == len(self.fields)
return MakeStruct([(n, ir) for (n, _), ir in zip(self.fields, irs)])
def render_children(self, r):
return [InsertFields.IFRenderField(escape_id(f), x) for f, x in self.fields]
def __eq__(self, other):
return isinstance(other, MakeStruct) \
and other.fields == self.fields
def __hash__(self):
return hash(tuple(self.fields))
def _compute_type(self, env, agg_env):
for f, x in self.fields:
x._compute_type(env, agg_env)
self._type = tstruct(**{f: x.typ for f, x in self.fields})
class SelectFields(IR):
@typecheck_method(old=IR, fields=sequenceof(str))
def __init__(self, old, fields):
super().__init__(old)
self.old = old
self.fields = fields
@typecheck_method(old=IR)
def copy(self, old):
return SelectFields(old, self.fields)
def head_str(self):
return '({})'.format(' '.join(map(escape_id, self.fields)))
def _eq(self, other):
return self.fields == other.fields
def _compute_type(self, env, agg_env):
self.old._compute_type(env, agg_env)
self._type = self.old.typ._select_fields(self.fields)
class InsertFields(IR):
class IFRenderField(Renderable):
def __init__(self, field, child):
super().__init__()
self.field = field
self.child = child
def render_head(self, r: Renderer):
return f'({self.field} '
def render_tail(self, r: Renderer):
return ')'
def render_children(self, r: Renderer):
return [self.child]
@staticmethod
@typecheck(old=IR, fields=sequenceof(sized_tupleof(str, IR)), field_order=nullable(sequenceof(str)))
def construct_with_deduplication(old, fields, field_order):
dd = defaultdict(int)
for k, v in fields:
if isinstance(v, GetField) and not isinstance(v.o, Ref):
dd[v.o] += 1
replacements = {}
lets = []
for k, v in dd.items():
if v > 1:
uid = Env.get_uid()
lets.append((uid, k))
replacements[k] = uid
insert_irs = []
for k, v in fields:
if isinstance(v, GetField) and v.o in replacements:
insert_irs.append((k, GetField(Ref(replacements[v.o]), v.name)))
else:
insert_irs.append((k, v))
r = InsertFields(old, insert_irs, field_order)
for uid, value in lets:
r = Let(uid, value, r)
return r
@typecheck_method(old=IR, fields=sequenceof(sized_tupleof(str, IR)), field_order=nullable(sequenceof(str)))
def __init__(self, old, fields, field_order):
super().__init__(old, *[ir for (f, ir) in fields])
self.old = old
self.fields = fields
self.field_order = field_order
def copy(self, *args):
assert len(args) == len(self.fields) + 1
return InsertFields(args[0], [(n, ir) for (n, _), ir in zip(self.fields, args[1:])], self.field_order)
def render_children(self, r):
return [
self.old,
hail.ir.RenderableStr(
'None' if self.field_order is None else parsable_strings(self.field_order)),
*(InsertFields.IFRenderField(escape_id(f), x) for f, x in self.fields)
]
def __eq__(self, other):
return isinstance(other, InsertFields) and \
other.old == self.old and \
other.fields == self.fields and \
other.field_order == self.field_order
def __hash__(self):
return hash((self.old, tuple(self.fields), tuple(self.field_order) if self.field_order else None))
def _compute_type(self, env, agg_env):
self.old._compute_type(env, agg_env)
for f, x in self.fields:
x._compute_type(env, agg_env)
self._type = self.old.typ._insert_fields(**{f: x.typ for f, x in self.fields})
if self.field_order:
self._type = tstruct(**{f: self._type[f] for f in self.field_order})
class GetField(IR):
@typecheck_method(o=IR, name=str)
def __init__(self, o, name):
super().__init__(o)
self.o = o
self.name = name
@typecheck_method(o=IR)
def copy(self, o):
return GetField(o, self.name)
def head_str(self):
return escape_id(self.name)
def _eq(self, other):
return self.name == other.name
@property
def is_nested_field(self):
return self.o.is_nested_field
def _compute_type(self, env, agg_env):
self.o._compute_type(env, agg_env)
self._type = self.o.typ[self.name]
class MakeTuple(IR):
@typecheck_method(elements=sequenceof(IR))
def __init__(self, elements):
super().__init__(*elements)
self.elements = elements
def copy(self, *args):
return MakeTuple(args)
def head_str(self):
return f'({" ".join([str(i) for i in range(len(self.elements))])})'
def _compute_type(self, env, agg_env):
for x in self.elements:
x._compute_type(env, agg_env)
self._type = ttuple(*[x.typ for x in self.elements])
class GetTupleElement(IR):
@typecheck_method(o=IR, idx=int)
def __init__(self, o, idx):
super().__init__(o)
self.o = o
self.idx = idx
@typecheck_method(o=IR)
def copy(self, o):
return GetTupleElement(o, self.idx)
def head_str(self):
return self.idx
def _eq(self, other):
return self.idx == other.idx
def _compute_type(self, env, agg_env):
self.o._compute_type(env, agg_env)
self._type = self.o.typ.types[self.idx]
class Die(IR):
@typecheck_method(message=IR, typ=hail_type, error_id=nullable(int), stack_trace=nullable(str))
def __init__(self, message, typ, error_id=None, stack_trace=None):
super().__init__(message)
self.message = message
self._typ = typ
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
@property
def typ(self):
return self._typ
def copy(self, message):
return Die(message, self._typ, self._error_id, self._stack_trace)
def head_str(self):
return f'{self._typ._parsable_string()} {self._error_id}'
def _eq(self, other):
return other._typ == self._typ
def _compute_type(self, env, agg_env):
self._type = self._typ
@staticmethod
def is_effectful() -> bool:
return True
class ConsoleLog(IR):
@typecheck_method(message=IR, result=IR)
def __init__(self, message, result):
super().__init__(message, result)
self.message = message
self.result = result
def _compute_type(self, env, agg_env):
self.message._compute_type(env, agg_env)
self.result._compute_type(env, agg_env)
self._type = self.result._type
def copy(self, message, result):
return ConsoleLog(message, result)
@staticmethod
def is_effectful() -> bool:
return True
_function_registry = defaultdict(list)
_seeded_function_registry = defaultdict(list)
_udf_registry = dict()
def clear_session_functions():
global _udf_registry
for f in _udf_registry.values():
remove_function(f._name, f._param_types, f._ret_type, f._type_args)
_udf_registry = dict()
def remove_function(name, param_types, ret_type, type_args=()):
f = (param_types, ret_type, type_args)
bindings = _function_registry[name]
bindings = [b for b in bindings if b != f]
if not bindings:
del _function_registry[name]
else:
_function_registry[name] = bindings
def register_function(name, param_types, ret_type, type_args=()):
_register(_function_registry, name, (param_types, ret_type, type_args))
def register_seeded_function(name, param_types, ret_type):
_register(_seeded_function_registry, name, (param_types, ret_type))
def udf(*param_types):
uid = Env.get_uid()
@decorator.decorator
def wrapper(__original_func, *args, **kwargs):
registry = hail.ir.ir._udf_registry
if uid in registry:
f = registry[uid]
else:
f = hail.experimental.define_function(__original_func, *param_types, _name=uid)
registry[uid] = f
return f(*args, **kwargs)
return wrapper
class Apply(IR):
@typecheck_method(function=str, return_type=hail_type, args=IR,
error_id=nullable(int), stack_trace=nullable(str), type_args=tupleof(hail_type))
def __init__(self, function, return_type, *args, type_args=(), error_id=None, stack_trace=None,):
super().__init__(*args)
self.function = function
self.return_type = return_type
self.type_args = type_args
self.args = args
self._error_id = error_id
self._stack_trace = stack_trace
if error_id is None or stack_trace is None:
self.save_error_info()
def copy(self, *args):
return Apply(self.function, self.return_type, *args, type_args=self.type_args, error_id=self._error_id, stack_trace=self._stack_trace,)
def head_str(self):
type_args = "(" + " ".join([a._parsable_string() for a in self.type_args]) + ")"
return f'{self._error_id} {escape_id(self.function)} {type_args} {self.return_type._parsable_string()}'
def _eq(self, other):
return other.function == self.function and \
other.type_args == self.type_args and \
other.return_type == self.return_type
def _compute_type(self, env, agg_env):
for arg in self.args:
arg._compute_type(env, agg_env)
self._type = self.return_type
class ApplySeeded(IR):
@typecheck_method(function=str, seed=int, return_type=hail_type, args=IR)
def __init__(self, function, seed, return_type, *args):
if hail.current_backend().requires_lowering:
warning("Seeded randomness is currently unreliable on the service. "
"You may observe some unexpected behavior. Don't use for real work yet.")
super().__init__(*args)
self.function = function
self.args = args
self.seed = seed
self.return_type = return_type
def copy(self, *args):
return ApplySeeded(self.function, self.seed, self.return_type, *args)
def head_str(self):
return f'{escape_id(self.function)} {self.seed} {self.return_type._parsable_string()}'
def _eq(self, other):
return other.function == self.function and \
other.seed == self.seed and \
other.return_type == self.return_type
def _compute_type(self, env, agg_env):
for arg in self.args:
arg._compute_type(env, agg_env)
self._type = self.return_type
@staticmethod
def is_effectful() -> bool:
return True
class TableCount(IR):
@typecheck_method(child=TableIR)
def __init__(self, child):
super().__init__(child)
self.child = child
@typecheck_method(child=TableIR)
def copy(self, child):
return TableCount(child)
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = tint64
class TableGetGlobals(IR):
@typecheck_method(child=TableIR)
def __init__(self, child):
super().__init__(child)
self.child = child
@typecheck_method(child=TableIR)
def copy(self, child):
return TableGetGlobals(child)
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = self.child.typ.global_type
class TableCollect(IR):
@typecheck_method(child=TableIR)
def __init__(self, child):
super().__init__(child)
self.child = child
@typecheck_method(child=TableIR)
def copy(self, child):
return TableCollect(child)
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = tstruct(**{'rows': tarray(self.child.typ.row_type),
'global': self.child.typ.global_type})
class TableAggregate(IR):
@typecheck_method(child=TableIR, query=IR)
def __init__(self, child, query):
super().__init__(child, query)
self.child = child
self.query = query
@typecheck_method(child=TableIR, query=IR)
def copy(self, child, query):
return TableAggregate(child, query)
def _compute_type(self, env, agg_env):
self.query._compute_type(self.child.typ.global_env(), self.child.typ.row_env())
self._type = self.query.typ
def renderable_new_block(self, i: int):
return i == 1
def renderable_bindings(self, i, default_value=None):
if i == 1:
env = self.child.typ.global_env(default_value)
env[BaseIR.agg_capability] = default_value
return env
else:
return {}
def renderable_agg_bindings(self, i, default_value=None):
return self.child.typ.row_env(default_value) if i == 1 else {}
class MatrixCount(IR):
@typecheck_method(child=MatrixIR)
def __init__(self, child):
super().__init__(child)
self.child = child
@typecheck_method(child=MatrixIR)
def copy(self, child):
return TableCount(child)
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = ttuple(tint64, tint32)
class MatrixAggregate(IR):
@typecheck_method(child=MatrixIR, query=IR)
def __init__(self, child, query):
super().__init__(child, query)
self.child = child
self.query = query
@typecheck_method(child=MatrixIR, query=IR)
def copy(self, child, query):
return MatrixAggregate(child, query)
def _compute_type(self, env, agg_env):
self.query._compute_type(self.child.typ.global_env(), self.child.typ.entry_env())
self._type = self.query.typ
def renderable_new_block(self, i: int):
return i == 1
def renderable_bindings(self, i, default_value=None):
if i == 1:
env = self.child.typ.global_env(default_value)
env[BaseIR.agg_capability] = default_value
return env
else:
return {}
def renderable_agg_bindings(self, i, default_value=None):
return self.child.typ.entry_env(default_value) if i == 1 else {}
class TableWrite(IR):
@typecheck_method(child=TableIR, writer=TableWriter)
def __init__(self, child, writer):
super().__init__(child)
self.child = child
self.writer = writer
@typecheck_method(child=TableIR)
def copy(self, child):
return TableWrite(child, self.writer)
def head_str(self):
return f'"{self.writer.render()}"'
def _eq(self, other):
return other.writer == self.writer
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = tvoid
@staticmethod
def is_effectful() -> bool:
return True
class MatrixWrite(IR):
@typecheck_method(child=MatrixIR, matrix_writer=MatrixWriter)
def __init__(self, child, matrix_writer):
super().__init__(child)
self.child = child
self.matrix_writer = matrix_writer
@typecheck_method(child=MatrixIR)
def copy(self, child):
return MatrixWrite(child, self.matrix_writer)
def head_str(self):
return f'"{self.matrix_writer.render()}"'
def _eq(self, other):
return other.matrix_writer == self.matrix_writer
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = tvoid
@staticmethod
def is_effectful() -> bool:
return True
class MatrixMultiWrite(IR):
@typecheck_method(children=sequenceof(MatrixIR), writer=MatrixNativeMultiWriter)
def __init__(self, children, writer):
super().__init__(*children)
self.writer = writer
def copy(self, *children):
return MatrixMultiWrite(children, self.writer)
def head_str(self):
return f'"{self.writer.render()}"'
def _eq(self, other):
return other.writer == self.writer
def _compute_type(self, env, agg_env):
for x in self.children:
x._compute_type()
self._type = tvoid
@staticmethod
def is_effectful() -> bool:
return True
class BlockMatrixCollect(IR):
@typecheck_method(child=BlockMatrixIR)
def __init__(self, child):
super().__init__(child)
self.child = child
def copy(self, child):
return BlockMatrixCollect(self.child)
def _eq(self, other):
return isinstance(other, BlockMatrixCollect) and self.child == other.child
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = tndarray(tfloat64, 2)
class BlockMatrixWrite(IR):
@typecheck_method(child=BlockMatrixIR, writer=BlockMatrixWriter)
def __init__(self, child, writer):
super().__init__(child)
self.child = child
self.writer = writer
def copy(self, child):
return BlockMatrixWrite(child, self.writer)
def head_str(self):
return f'"{self.writer.render()}"'
def _eq(self, other):
return self.writer == other.writer
def _compute_type(self, env, agg_env):
self.child._compute_type()
self._type = self.writer._type()
@staticmethod
def is_effectful() -> bool:
return True
class BlockMatrixMultiWrite(IR):
@typecheck_method(block_matrices=sequenceof(BlockMatrixIR), writer=BlockMatrixMultiWriter)
def __init__(self, block_matrices, writer):
super().__init__(*block_matrices)
self.block_matrices = block_matrices
self.writer = writer
def copy(self, *block_matrices):
return BlockMatrixMultiWrite(block_matrices, self.writer)
def head_str(self):
return f'"{self.writer.render()}"'
def _eq(self, other):
return self.writer == other.writer
def _compute_type(self, env, agg_env):
for x in self.block_matrices:
x._compute_type()
self._type = tvoid
@staticmethod
def is_effectful() -> bool:
return True
class TableToValueApply(IR):
def __init__(self, child, config):
super().__init__(child)
self.child = child
self.config = config
@typecheck_method(child=TableIR)
def copy(self, child):
return TableToValueApply(child, self.config)
def head_str(self):
return dump_json(self.config)
def _eq(self, other):
return other.config == self.config
def _compute_type(self, env, agg_env):
name = self.config['name']
if name == 'ForceCountTable':
self._type = tint64
elif name == 'TableCalculateNewPartitions':
self._type = tarray(tinterval(self.child.typ.key_type))
else:
assert name == 'NPartitionsTable', name
self._type = tint32
class MatrixToValueApply(IR):
def __init__(self, child, config):
super().__init__(child)
self.child = child
self.config = config
@typecheck_method(child=MatrixIR)
def copy(self, child):
return MatrixToValueApply(child, self.config)
def head_str(self):
return dump_json(self.config)
def _eq(self, other):
return other.config == self.config
def _compute_type(self, env, agg_env):
name = self.config['name']
if name == 'ForceCountMatrixTable':
self._type = tint64
elif name == 'NPartitionsMatrixTable':
self._type = tint32
elif name == 'MatrixExportEntriesByCol':
self._type = tvoid
else:
assert name == 'MatrixWriteBlockMatrix', name
self._type = tvoid
class BlockMatrixToValueApply(IR):
def __init__(self, child, config):
super().__init__(child)
self.child = child
self.config = config
@typecheck_method(child=BlockMatrixIR)
def copy(self, child):
new_instance = self.__class__
return new_instance(child, self.config)
def head_str(self):
return dump_json(self.config)
def _eq(self, other):
return other.config == self.config
def _compute_type(self, env, agg_env):
assert self.config['name'] == 'GetElement'
self._type = tfloat64
class Literal(IR):
@typecheck_method(typ=hail_type,
value=anytype)
def __init__(self, typ, value):
super(Literal, self).__init__()
self._typ: HailType = typ
self.value = value
def copy(self):
return Literal(self._typ, self.value)
def head_str(self):
return f'{self._typ._parsable_string()} {dump_json(self._typ._convert_to_json_na(self.value))}'
def _eq(self, other):
return other._typ == self._typ and \
other.value == self.value
def _compute_type(self, env, agg_env):
self._type = self._typ
class LiftMeOut(IR):
@typecheck_method(child=IR)
def __init__(self, child):
super().__init__(child)
self.child = child
def copy(self, child):
return LiftMeOut(child)
def _compute_type(self, env, agg_env):
self.child._compute_type(env, agg_env)
self._type = self.child.typ
class Join(IR):
_idx = 0
@typecheck_method(virtual_ir=IR,
temp_vars=sequenceof(str),
join_exprs=sequenceof(anytype),
join_func=func_spec(1, anytype))
def __init__(self, virtual_ir, temp_vars, join_exprs, join_func):
super(Join, self).__init__(virtual_ir)
self.virtual_ir = virtual_ir
self.temp_vars = temp_vars
self.join_exprs = join_exprs
self.join_func = join_func
self.idx = Join._idx
Join._idx += 1
def copy(self, virtual_ir):
# FIXME: This is pretty fucked, Joins should probably be tracked on Expression?
new_instance = self.__class__
new_instance = new_instance(virtual_ir,
self.temp_vars,
self.join_exprs,
self.join_func)
new_instance.idx = self.idx
return new_instance
def search(self, criteria):
matches = []
for e in self.join_exprs:
matches += e._ir.search(criteria)
matches += super(Join, self).search(criteria)
return matches
def render_head(self, r):
return self.virtual_ir.render_head(r)
def render_tail(self, r):
return self.virtual_ir.render_tail(r)
def render_children(self, r):
return self.virtual_ir.render_children(r)
def _compute_type(self, env, agg_env):
self.virtual_ir._compute_type(env, agg_env)
self._type = self.virtual_ir._type
class JavaIR(IR):
def __init__(self, jir):
super(JavaIR, self).__init__()
self._jir = jir
super().__init__()
def copy(self):
return JavaIR(self._jir)
def render_head(self, r):
return f'(JavaIR{r.add_jir(self._jir)}'
def _eq(self, other):
return self._jir == other._jir
def _compute_type(self, env, agg_env):
self._type = dtype(self._jir.typ().toString())
def subst(ir, env, agg_env):
def _subst(ir, env2=None, agg_env2=None):
return subst(ir, env2 if env2 else env, agg_env2 if agg_env2 else agg_env)
def delete(env, name):
new_env = copy.deepcopy(env)
if name in new_env:
del new_env[name]
return new_env
if isinstance(ir, Ref):
return env.get(ir.name, ir)
elif isinstance(ir, Let):
return Let(ir.name,
_subst(ir.value),
_subst(ir.body, delete(env, ir.name)))
elif isinstance(ir, AggLet):
return AggLet(ir.name,
_subst(ir.value, agg_env, {}),
_subst(ir.body, delete(env, ir.name)),
ir.is_scan)
elif isinstance(ir, StreamMap):
return StreamMap(_subst(ir.a),
ir.name,
_subst(ir.body, delete(env, ir.name)))
elif isinstance(ir, StreamFilter):
return StreamFilter(_subst(ir.a),
ir.name,
_subst(ir.body, delete(env, ir.name)))
elif isinstance(ir, StreamFlatMap):
return StreamFlatMap(_subst(ir.a),
ir.name,
_subst(ir.body, delete(env, ir.name)))
elif isinstance(ir, StreamFold):
return StreamFold(_subst(ir.a),
_subst(ir.zero),
ir.accum_name,
ir.value_name,
_subst(ir.body, delete(delete(env, ir.accum_name), ir.value_name)))
elif isinstance(ir, StreamScan):
return StreamScan(_subst(ir.a),
_subst(ir.zero),
ir.accum_name,
ir.value_name,
_subst(ir.body, delete(delete(env, ir.accum_name), ir.value_name)))
elif isinstance(ir, StreamFor):
return StreamFor(_subst(ir.a),
ir.value_name,
_subst(ir.body, delete(env, ir.value_name)))
elif isinstance(ir, AggFilter):
return AggFilter(_subst(ir.cond, agg_env),
_subst(ir.agg_ir, agg_env),
ir.is_scan)
elif isinstance(ir, AggExplode):
return AggExplode(_subst(ir.s, agg_env),
ir.name,
_subst(ir.agg_body, delete(agg_env, ir.name), delete(agg_env, ir.name)),
ir.is_scan)
elif isinstance(ir, AggGroupBy):
return AggGroupBy(_subst(ir.key, agg_env),
_subst(ir.agg_ir, agg_env),
ir.is_scan)
elif isinstance(ir, ApplyAggOp):
subst_init_op_args = [x.map_ir(lambda x: _subst(x)) for x in ir.init_op_args]
subst_seq_op_args = [subst(x, agg_env, {}) for x in ir.seq_op_args]
return ApplyAggOp(ir.agg_op,
subst_init_op_args,
subst_seq_op_args)
elif isinstance(ir, AggFold):
subst_seq_op = subst(ir.seq_op, agg_env, {})
return AggFold(ir.zero, subst_seq_op, ir.comb_op, ir.accum_name, ir.other_accum_name, ir.is_scan)
elif isinstance(ir, AggArrayPerElement):
return AggArrayPerElement(_subst(ir.array, agg_env),
ir.element_name,
ir.index_name,
_subst(ir.agg_ir, delete(env, ir.index_name),
delete(agg_env, ir.element_name)),
ir.is_scan)
else:
assert isinstance(ir, IR)
return ir.map_ir(lambda x: _subst(x))
```
#### File: hail/methods/import_lines_helpers.py
```python
from typing import List, Optional
import hail as hl
from hail.utils.misc import hl_plural, plural
def split_lines(
row: hl.StructExpression,
fields: List[str],
*,
delimiter: str,
missing: str,
quote: str
) -> hl.ArrayExpression:
split_array = row.text._split_line(delimiter, missing=missing, quote=quote, regex=len(delimiter) > 1)
return (
hl.case()
.when(hl.len(split_array) == len(fields), split_array)
.or_error(
hl.format(
f'''error in number of fields found: in file %s
Expected {len(fields)} {plural("field", len(fields))}, found %d %s on line:
%s''',
row.file, hl.len(split_array), hl_plural("field", hl.len(split_array)), row.text
)
)
)
def match_comment(comment: str, line: hl.StringExpression) -> hl.Expression:
if len(comment) == 1:
return line.startswith(comment)
return line.matches(comment, True)
def should_remove_line(
line: hl.StringExpression,
*,
filter: str,
comment: List[str],
skip_blank_lines: bool
) -> Optional[hl.BooleanExpression]:
condition = None
if filter is not None:
condition = line.matches(filter)
if len(comment) > 0:
if condition is None:
condition = hl.bool(False)
for mark in comment:
condition = condition | match_comment(mark, line)
if skip_blank_lines:
if condition is None:
condition = hl.bool(False)
condition = condition | (hl.len(line) == 0)
return condition
```
#### File: hail/stats/linear_mixed_model.py
```python
class LinearMixedModel(object):
r"""Class representing a linear mixed model.
.. warning::
This functionality is no longer implemented/supported as of Hail 0.2.94.
"""
def __init__(self, py, px, s, y=None, x=None, p_path=None):
raise NotImplementedError("LinearMixedModel is no longer implemented/supported as of Hail 0.2.94")
```
#### File: hailtop/auth/auth.py
```python
from typing import Optional, Dict, Tuple
import os
import aiohttp
from hailtop.config import get_deploy_config, DeployConfig
from hailtop.utils import async_to_blocking, request_retry_transient_errors
from hailtop import httpx
from .tokens import get_tokens
def namespace_auth_headers(deploy_config: DeployConfig,
ns: str,
authorize_target: bool = True,
*,
token_file: Optional[str] = None
) -> Dict[str, str]:
headers = {}
if authorize_target:
headers['Authorization'] = f'Bearer {get_tokens(token_file).namespace_token_or_error(ns)}'
if deploy_config.location() == 'external' and ns != 'default':
headers['X-Hail-Internal-Authorization'] = f'Bearer {get_tokens(token_file).namespace_token_or_error("default")}'
return headers
def service_auth_headers(deploy_config: DeployConfig,
service: str,
authorize_target: bool = True,
*,
token_file: Optional[str] = None
) -> Dict[str, str]:
ns = deploy_config.service_ns(service)
return namespace_auth_headers(deploy_config, ns, authorize_target, token_file=token_file)
def deploy_config_and_headers_from_namespace(namespace: Optional[str] = None, *, authorize_target: bool = True) -> Tuple[DeployConfig, Dict[str, str], str]:
deploy_config = get_deploy_config()
if namespace is not None:
deploy_config = deploy_config.with_default_namespace(namespace)
else:
namespace = deploy_config.default_namespace()
headers = namespace_auth_headers(deploy_config, namespace, authorize_target=authorize_target)
return (deploy_config, headers, namespace)
async def async_get_userinfo(*,
deploy_config: Optional[DeployConfig] = None,
session_id: Optional[str] = None,
client_session: Optional[httpx.ClientSession] = None):
if deploy_config is None:
deploy_config = get_deploy_config()
if session_id is None:
headers = service_auth_headers(deploy_config, 'auth')
else:
headers = {'Authorization': f'Bearer {session_id}'}
userinfo_url = deploy_config.url('auth', '/api/v1alpha/userinfo')
async def request(session):
try:
resp = await request_retry_transient_errors(
session, 'GET', userinfo_url, headers=headers)
return await resp.json()
except aiohttp.client_exceptions.ClientResponseError as err:
if err.status == 401:
return None
raise
if client_session is None:
async with httpx.client_session() as session:
return await request(session)
return await request(client_session)
def get_userinfo(deploy_config=None, session_id=None, client_session=None):
return async_to_blocking(async_get_userinfo(
deploy_config=deploy_config,
session_id=session_id,
client_session=client_session))
def copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None):
return async_to_blocking(async_copy_paste_login(copy_paste_token, namespace))
async def async_copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None):
deploy_config, headers, namespace = deploy_config_and_headers_from_namespace(namespace, authorize_target=False)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=5),
headers=headers) as session:
async with await request_retry_transient_errors(
session, 'POST', deploy_config.url('auth', '/api/v1alpha/copy-paste-login'),
params={'copy_paste_token': copy_paste_token}) as resp:
data = await resp.json()
token = data['token']
username = data['username']
tokens = get_tokens()
tokens[namespace] = token
dot_hail_dir = os.path.expanduser('~/.hail')
if not os.path.exists(dot_hail_dir):
os.mkdir(dot_hail_dir, mode=0o700)
tokens.write()
return namespace, username
def get_user(username: str, namespace: Optional[str] = None) -> dict:
return async_to_blocking(async_get_user(username, namespace))
async def async_get_user(username: str, namespace: Optional[str] = None) -> dict:
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=30),
headers=headers) as session:
async with await request_retry_transient_errors(
session, 'GET', deploy_config.url('auth', f'/api/v1alpha/users/{username}')) as resp:
return await resp.json()
def create_user(username: str, login_id: str, is_developer: bool, is_service_account: bool, namespace: Optional[str] = None):
return async_to_blocking(async_create_user(username, login_id, is_developer, is_service_account, namespace=namespace))
async def async_create_user(username: str, login_id: str, is_developer: bool, is_service_account: bool, namespace: Optional[str] = None):
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
body = {
'login_id': login_id,
'is_developer': is_developer,
'is_service_account': is_service_account,
}
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=30),
headers=headers) as session:
await request_retry_transient_errors(
session, 'POST', deploy_config.url('auth', f'/api/v1alpha/users/{username}/create'), json=body
)
def delete_user(username: str, namespace: Optional[str] = None):
return async_to_blocking(async_delete_user(username, namespace=namespace))
async def async_delete_user(username: str, namespace: Optional[str] = None):
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=300),
headers=headers) as session:
await request_retry_transient_errors(
session, 'DELETE', deploy_config.url('auth', f'/api/v1alpha/users/{username}')
)
```
#### File: python/hailtop/httpx.py
```python
from typing import Any, Tuple, Optional, Type, TypeVar, Generic, Callable, Union
from types import TracebackType
import orjson
import aiohttp
from .utils import async_to_blocking
from .tls import internal_client_ssl_context, external_client_ssl_context
from .config.deploy_config import get_deploy_config
class ClientResponseError(aiohttp.ClientResponseError):
def __init__(self,
request_info: aiohttp.RequestInfo,
history: Tuple[aiohttp.ClientResponse, ...],
body: str = "",
**kwargs):
super().__init__(request_info, history, **kwargs)
self.body = body
def __str__(self) -> str:
return (f"{self.status}, message={self.message!r}, "
f"url={self.request_info.real_url!r} body={self.body!r}")
def __repr__(self) -> str:
args = f"{self.request_info!r}, {self.history!r}"
if self.status != 0:
args += f", status={self.status!r}"
if self.message != "":
args += f", message={self.message!r}"
if self.headers is not None:
args += f", headers={self.headers!r}"
if self.body is not None:
args += f", body={self.body!r}"
return f"{type(self).__name__}({args})"
class ClientResponse:
def __init__(self, client_response: aiohttp.ClientResponse):
self.client_response = client_response
async def release(self) -> None:
return await self.client_response.release()
@property
def closed(self) -> bool:
return self.client_response.closed
def close(self) -> None:
return self.client_response.close()
async def wait_for_close(self) -> None:
return await self.wait_for_close()
async def read(self) -> bytes:
return await self.client_response.read()
def get_encoding(self) -> str:
return self.client_response.get_encoding()
async def text(self, encoding: Optional[str] = None, errors: str = 'strict'):
return await self.client_response.text(encoding=encoding, errors=errors)
async def json(self):
encoding = self.get_encoding()
if encoding != 'utf-8':
return await self.client_response.json()
content_type = self.client_response.headers.get(aiohttp.hdrs.CONTENT_TYPE, None)
assert content_type is None or content_type == 'application/json', self.client_response
return orjson.loads(await self.read())
async def __aenter__(self) -> "ClientResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.release()
class ClientSession:
def __init__(self,
*args,
raise_for_status: bool = True,
timeout: Union[aiohttp.ClientTimeout, float] = None,
**kwargs):
location = get_deploy_config().location()
if location == 'external':
tls = external_client_ssl_context()
elif location == 'k8s':
tls = internal_client_ssl_context()
else:
assert location in ('gce', 'azure')
# no encryption on the internal gateway
tls = external_client_ssl_context()
assert 'connector' not in kwargs
if timeout is None:
timeout = aiohttp.ClientTimeout(total=5)
self.raise_for_status = raise_for_status
self.client_session = aiohttp.ClientSession(
*args,
timeout=timeout,
raise_for_status=False,
connector=aiohttp.TCPConnector(ssl=tls),
**kwargs
)
def request(
self, method: str, url: aiohttp.client.StrOrURL, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
raise_for_status = kwargs.pop('raise_for_status', self.raise_for_status)
async def request_and_raise_for_status():
json_data = kwargs.pop('json', None)
if json_data is not None:
if kwargs.get('data') is not None:
raise ValueError(
'data and json parameters cannot be used at the same time')
kwargs['data'] = aiohttp.BytesPayload(
value=orjson.dumps(json_data),
# https://github.com/ijl/orjson#serialize
#
# "The output is a bytes object containing UTF-8"
encoding="utf-8",
content_type="application/json",
)
resp = await self.client_session._request(method, url, **kwargs)
if raise_for_status:
if resp.status >= 400:
# reason should always be not None for a started response
assert resp.reason is not None
body = (await resp.read()).decode()
await resp.release()
raise ClientResponseError(
resp.request_info,
resp.history,
status=resp.status,
message=resp.reason,
headers=resp.headers,
body=body
)
return resp
return aiohttp.client._RequestContextManager(request_and_raise_for_status())
def ws_connect(
self, *args, **kwargs
) -> aiohttp.client._WSRequestContextManager:
return self.client_session.ws_connect(*args, **kwargs)
def get(
self, url: aiohttp.client.StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('GET', url, allow_redirects=allow_redirects, **kwargs)
def options(
self, url: aiohttp.client.StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('OPTIONS', url, allow_redirects=allow_redirects, **kwargs)
def head(
self, url: aiohttp.client.StrOrURL, *, allow_redirects: bool = False, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('HEAD', url, allow_redirects=allow_redirects, **kwargs)
def post(
self, url: aiohttp.client.StrOrURL, *, data: Any = None, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('POST', url, data=data, **kwargs)
def put(
self, url: aiohttp.client.StrOrURL, *, data: Any = None, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('PUT', url, data=data, **kwargs)
def patch(
self, url: aiohttp.client.StrOrURL, *, data: Any = None, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('PATCH', url, data=data, **kwargs)
def delete(
self, url: aiohttp.client.StrOrURL, **kwargs: Any
) -> aiohttp.client._RequestContextManager:
return self.request('DELETE', url, **kwargs)
async def close(self) -> None:
await self.client_session.close()
@property
def closed(self) -> bool:
return self.client_session.closed
@property
def cookie_jar(self) -> aiohttp.abc.AbstractCookieJar:
return self.client_session.cookie_jar
@property
def version(self) -> Tuple[int, int]:
return self.client_session.version
async def __aenter__(self) -> "ClientSession":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.client_session.__aexit__(exc_type, exc_val, exc_tb)
def client_session(*args, **kwargs) -> ClientSession:
return ClientSession(*args, **kwargs)
def blocking_client_session(*args, **kwargs) -> 'BlockingClientSession':
return BlockingClientSession(client_session(*args, **kwargs))
class BlockingClientResponse:
def __init__(self, client_response: aiohttp.ClientResponse):
self.client_response = client_response
def read(self) -> bytes:
return async_to_blocking(self.client_response.read())
def text(self, encoding: Optional[str] = None, errors: str = 'strict') -> str:
return async_to_blocking(self.client_response.text(
encoding=encoding, errors=errors))
def json(self, *,
encoding: str = None,
loads: aiohttp.typedefs.JSONDecoder = aiohttp.typedefs.DEFAULT_JSON_DECODER,
content_type: Optional[str] = 'application/json') -> Any:
return async_to_blocking(self.client_response.json(
encoding=encoding, loads=loads, content_type=content_type))
def __del__(self):
self.client_response.__del__()
def history(self) -> Tuple[aiohttp.ClientResponse, ...]:
return self.client_response.history
def __repr__(self) -> str:
return f'BlockingClientRepsonse({repr(self.client_response)})'
@property
def status(self) -> int:
return self.client_response.status
def raise_for_status(self) -> None:
self.client_response.raise_for_status()
class BlockingClientWebSocketResponse:
def __init__(self, ws: aiohttp.ClientWebSocketResponse):
self.ws = ws
@property
def closed(self) -> bool:
return self.ws.closed
@property
def close_code(self) -> Optional[int]:
return self.ws.close_code
@property
def protocol(self) -> Optional[str]:
return self.ws.protocol
@property
def compress(self) -> int:
return self.ws.compress
@property
def client_notakeover(self) -> bool:
return self.ws.client_notakeover
def get_extra_info(self, name: str, default: Any = None) -> Any:
return self.ws.get_extra_info(name, default)
def exception(self) -> Optional[BaseException]:
return self.ws.exception()
def ping(self, message: bytes = b'') -> None:
async_to_blocking(self.ws.ping(message))
def pong(self, message: bytes = b'') -> None:
async_to_blocking(self.ws.pong(message))
def send_str(self, data: str,
compress: Optional[int] = None) -> None:
return async_to_blocking(self.ws.send_str(data, compress))
def send_bytes(self, data: bytes,
compress: Optional[int] = None) -> None:
return async_to_blocking(self.ws.send_bytes(data, compress))
def send_json(self, data: Any,
compress: Optional[int] = None,
*, dumps: aiohttp.typedefs.JSONEncoder = aiohttp.typedefs.DEFAULT_JSON_ENCODER) -> None:
return async_to_blocking(self.ws.send_json(data, compress, dumps=dumps))
def close(self, *, code: int = 1000, message: bytes = b'') -> bool:
return async_to_blocking(self.ws.close(code=code, message=message))
def receive(self, timeout: Optional[float] = None) -> aiohttp.WSMessage:
return async_to_blocking(self.ws.receive(timeout))
def receive_str(self, *, timeout: Optional[float] = None) -> str:
return async_to_blocking(self.ws.receive_str(timeout=timeout))
def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
return async_to_blocking(self.ws.receive_bytes(timeout=timeout))
def receive_json(self,
*, loads: aiohttp.typedefs.JSONDecoder = aiohttp.typedefs.DEFAULT_JSON_DECODER,
timeout: Optional[float] = None) -> Any:
return async_to_blocking(self.ws.receive_json(loads=loads, timeout=timeout))
def __iter__(self) -> 'BlockingClientWebSocketResponse':
return self
def __next__(self) -> aiohttp.WSMessage:
try:
return async_to_blocking(self.ws.__anext__())
except StopAsyncIteration as exc:
raise StopIteration() from exc
T = TypeVar('T') # pylint: disable=invalid-name
U = TypeVar('U') # pylint: disable=invalid-name
class AsyncToBlockingContextManager(Generic[T, U]):
def __init__(self, context_manager, wrap: Callable[[T], U]):
self.context_manager = context_manager
self.wrap = wrap
def __enter__(self) -> U:
return self.wrap(async_to_blocking(self.context_manager.__aenter__()))
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType]) -> None:
async_to_blocking(self.context_manager.__aexit__(exc_type, exc, tb))
class BlockingClientResponseContextManager(AsyncToBlockingContextManager):
def __init__(self, context_manager):
super().__init__(context_manager, BlockingClientResponse)
class BlockingClientWebSocketResponseContextManager(AsyncToBlockingContextManager):
def __init__(self, context_manager):
super().__init__(context_manager, BlockingClientWebSocketResponse)
class BlockingClientSession:
def __init__(self, session: ClientSession):
self.session = session
def request(self,
method: str,
url: aiohttp.typedefs.StrOrURL,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(
self.session.request(method, url, **kwargs))
def ws_connect(self,
url: aiohttp.typedefs.StrOrURL,
**kwargs: Any) -> BlockingClientWebSocketResponseContextManager:
return BlockingClientWebSocketResponseContextManager(
self.session.ws_connect(url, **kwargs))
def get(self,
url: aiohttp.typedefs.StrOrURL,
*,
allow_redirects: bool = True,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(
self.session.get(url, allow_redirects=allow_redirects, **kwargs))
def options(self,
url: aiohttp.typedefs.StrOrURL,
*,
allow_redirects: bool = True,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(
self.session.options(url, allow_redirects=allow_redirects, **kwargs))
def head(self,
url: aiohttp.typedefs.StrOrURL,
*,
allow_redirects: bool = False,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(self.session.head(
url, allow_redirects=allow_redirects, **kwargs))
def post(self,
url: aiohttp.typedefs.StrOrURL,
*,
data: Any = None, **kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(self.session.post(
url, data=data, **kwargs))
def put(self,
url: aiohttp.typedefs.StrOrURL,
*,
data: Any = None,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(self.session.put(
url, data=data, **kwargs))
def patch(self,
url: aiohttp.typedefs.StrOrURL,
*,
data: Any = None,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(self.session.patch(
url, data=data, **kwargs))
def delete(self,
url: aiohttp.typedefs.StrOrURL,
**kwargs: Any) -> BlockingClientResponseContextManager:
return BlockingClientResponseContextManager(self.session.delete(
url, **kwargs))
def close(self) -> None:
async_to_blocking(self.session.close())
@property
def closed(self) -> bool:
return self.session.closed
@property
def cookie_jar(self) -> aiohttp.abc.AbstractCookieJar:
return self.session.cookie_jar
@property
def version(self) -> Tuple[int, int]:
return self.session.version
def __enter__(self) -> 'BlockingClientSession':
self.session = async_to_blocking(self.session.__aenter__())
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.close()
```
#### File: hailtop/utils/tqdm.py
```python
from typing import Union, Optional
from enum import Enum
class TqdmDisableOption(Enum):
default = 0
def tqdm(*args, disable: Optional[Union[TqdmDisableOption, bool]] = TqdmDisableOption.default, **kwargs):
from tqdm.notebook import tqdm as tqdm_notebook # pylint: disable=import-outside-toplevel
from tqdm.auto import tqdm as tqdm_auto # pylint: disable=import-outside-toplevel
# To tqdm_notebook, None means do not display. To standard tqdm, None means
# display only when connected to a TTY.
if disable == TqdmDisableOption.default:
disable = False if tqdm_auto == tqdm_notebook else None
return tqdm_auto(*args, disable=disable, **kwargs)
```
#### File: hail/utils/jsonx.py
```python
import json
import hail as hl
class JSONEncoder(json.JSONEncoder):
"""JSONEncoder that supports some Hail types."""
def default(self, o):
if isinstance(o, (hl.utils.frozendict, hl.utils.Struct)):
return dict(o)
if isinstance(o, hl.utils.Interval):
return {
"start": o.start,
"end": o.end,
"includes_start": o.includes_start,
"includes_end": o.includes_end,
}
if isinstance(o, hl.genetics.Locus):
return {
"contig": o.contig,
"position": o.position,
"reference_genome": o.reference_genome,
}
if isinstance(o, hl.genetics.ReferenceGenome):
return o.name
return json.JSONEncoder.default(self, o)
```
#### File: hailtop/utils/test_utils.py
```python
from hailtop.utils import (partition, url_basename, url_join, url_scheme,
url_and_params, parse_docker_image_reference, grouped)
from hailtop.utils.utils import digits_needed, unzip, filter_none, flatten
def test_partition_zero_empty():
assert list(partition(0, [])) == []
def test_partition_even_small():
assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)]
def test_partition_even_big():
assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)]
def test_partition_uneven_big():
assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)]
def test_partition_toofew():
assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3),
range(3, 3), range(3, 3), range(3, 3)]
def test_url_basename():
assert url_basename('/path/to/file') == 'file'
assert url_basename('https://hail.is/path/to/file') == 'file'
def test_url_join():
assert url_join('/path/to', 'file') == '/path/to/file'
assert url_join('/path/to/', 'file') == '/path/to/file'
assert url_join('/path/to/', '/absolute/file') == '/absolute/file'
assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file'
def test_url_scheme():
assert url_scheme('https://hail.is/path/to') == 'https'
assert url_scheme('/path/to') == ''
def test_url_and_params():
assert url_and_params('https://example.com/') == ('https://example.com/', {})
assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {})
assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'})
def test_parse_docker_image_reference():
x = parse_docker_image_reference('animage')
assert x.domain is None
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'animage'
assert str(x) == 'animage'
x = parse_docker_image_reference('hailgenetics/animage')
assert x.domain == 'hailgenetics'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'hailgenetics/animage'
assert str(x) == 'hailgenetics/animage'
x = parse_docker_image_reference('localhost:5000/animage')
assert x.domain == 'localhost:5000'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/animage'
assert str(x) == 'localhost:5000/animage'
x = parse_docker_image_reference('localhost:5000/a/b/name')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'
x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'
x = parse_docker_image_reference('name@sha256:abc123')
assert x.domain is None
assert x.path == 'name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'name'
assert str(x) == 'name@sha256:abc123'
x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')
assert x.domain == 'gcr.io'
assert x.path == 'hail-vdc/batch-worker'
assert x.tag == '123fds312'
assert x.digest is None
assert x.name() == 'gcr.io/hail-vdc/batch-worker'
assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'
x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image')
assert x.domain == 'us-docker.pkg.dev'
assert x.path == 'my-project/my-repo/test-image'
assert x.tag is None
assert x.digest is None
assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'
assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'
def test_grouped_size_0_groups_9_elements():
try:
list(grouped(0, [1,2,3,4,5,6,7,8,9]))
except ValueError:
pass
else:
assert False
def test_grouped_size_1_groups_9_elements():
actual = list(grouped(1, [1,2,3,4,5,6,7,8,9]))
expected = [[1], [2], [3], [4], [5], [6], [7], [8], [9]]
assert actual == expected
def test_grouped_size_5_groups_9_elements():
actual = list(grouped(5, [1,2,3,4,5,6,7,8,9]))
expected = [[1, 2, 3, 4, 5], [6, 7, 8, 9]]
assert actual == expected
def test_grouped_size_3_groups_0_elements():
actual = list(grouped(3,[]))
expected = []
assert actual == expected
def test_grouped_size_2_groups_1_elements():
actual = list(grouped(2,[1]))
expected = [[1]]
assert actual == expected
def test_grouped_size_1_groups_0_elements():
actual = list(grouped(1,[0]))
expected = [[0]]
assert actual == expected
def test_grouped_size_1_groups_5_elements():
actual = list(grouped(1,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc'], ['def'], ['ghi'], ['jkl'], ['mno']]
assert actual == expected
def test_grouped_size_2_groups_5_elements():
actual = list(grouped(2,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc', 'def'], ['ghi', 'jkl'], ['mno']]
assert actual == expected
def test_grouped_size_3_groups_6_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', '']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', '']]
assert actual == expected
def test_grouped_size_3_groups_7_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', 'pqr', 'stu']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', 'pqr'], ['stu']]
assert actual == expected
def test_unzip():
assert unzip([]) == ([], [])
assert unzip([(0, 'a')]) == ([0], ['a'])
assert unzip([(123, '')]) == ([123], [''])
assert unzip([(123, 'abc')]) == ([123], ['abc'])
assert unzip([(123, 456), ('abc', 'def')]) == ([123, 'abc'], [456, 'def'])
assert unzip([(123, 'abc'), (456, 'def'), (789, 'ghi')]) == ([123, 456, 789], ['abc', 'def', 'ghi'])
def test_digits_needed():
assert digits_needed(0) == 1
assert digits_needed(1) == 1
assert digits_needed(12) == 2
assert digits_needed(333) == 3
assert digits_needed(100) == 3
assert digits_needed(3000) == 4
assert digits_needed(50000) == 5
def test_filter_none():
assert filter_none([]) == []
assert filter_none([None, []]) == [[]]
assert filter_none([0, []]) == [0, []]
assert filter_none([1, 2, [None]]) == [1, 2, [None]]
assert filter_none([1, 3.5, 2, 4,]) == [1, 3.5, 2, 4]
assert filter_none([1, 2, 3.0, None, 5]) == [1, 2, 3.0, 5]
assert filter_none(['a', 'b', 'c', None]) == ['a', 'b', 'c']
assert filter_none([None, [None, [None, [None]]]]) == [[None, [None, [None]]]]
def test_flatten():
assert flatten([]) == []
assert flatten([[]]) == []
assert flatten([[], []]) == []
assert flatten([[], [3]]) == [3]
assert flatten([[1, 2, 3], [3], [4, 5]]) == [1, 2, 3, 3, 4, 5]
assert flatten([['a', 'b', 'c'], ['d', 'e']]) == ['a', 'b', 'c', 'd', 'e']
assert flatten([[['a'], ['b']], [[1, 2, 3], [4, 5]]]) == [['a'], ['b'], [1, 2, 3], [4, 5]]
assert flatten([['apples'], ['bannanas'], ['oranges']]) == ['apples', 'bannanas', 'oranges']
assert flatten([['apple', 'bannana'], ['a', 'b', 'c'], [1, 2, 3, 4]]) == ['apple', 'bannana', 'a', 'b', 'c', 1, 2, 3, 4]
assert flatten([['apples'], [''], ['bannanas'], [''], ['oranges'], ['']]) == ['apples', '', 'bannanas', '', 'oranges', '']
``` |
{
"source": "jkguiang/CMSMonitoring",
"score": 2
} |
#### File: python/CMSMonitoring/StompAMQ.py
```python
from __future__ import print_function
from __future__ import division
import os
import json
import time
import socket
import random
import logging
try:
import stomp
except ImportError:
print("No stomp module found")
from uuid import uuid4
from CMSMonitoring.Validator import validate_schema, Schemas
# global object which holds CMS Monitoring schemas
_schemas = Schemas(update=3600, jsonschemas=False)
_local_schemas = None
def validate(doc, schema, logger):
"""
Helper function to validate given document against a schema
Schemas are searched for locally, or within the central CMSMonitoring
schemas. The provided schema name is compared to full file names and
to their base file names (without extension).
Return a list of offending keys and a list of unknown keys, or None, None
if no validation has been performed.
"""
global _local_schemas
if _local_schemas is None:
# First time running, try to find the schema locally
# Second time, _local_schemas will be a dictionary and this is skipped
_local_schemas = {}
if os.path.isfile(schema):
try:
_local_schemas[schema] = json.load(open(schema))
msg = 'Successfully loaded local schema {} for validation'.format(schema)
logger.warn(msg)
except ValueError:
msg = 'Local schema {} is not json compliant'.format(schema)
logger.error(msg)
if schema in _local_schemas:
return validate_schema(_local_schemas[schema], doc, logger)
else:
for sch in _schemas.schemas():
if schema in [sch, os.path.basename(sch).rsplit('.')[0]]:
return validate_schema(_schemas.schemas()[sch], doc, logger)
msg = "Schema not found: '{}'".format(schema)
logger.error(msg)
return None, None
class StompyListener(object):
"""
Auxiliar listener class to fetch all possible states in the Stomp
connection.
"""
def __init__(self, logger=None):
logging.basicConfig(level=logging.debug)
self.logger = logger if logger else logging.getLogger('StompyListener')
def safe_headers(self, headers):
"Return stripped headers"
hdrs = dict(headers)
for key in ['username', 'password', 'login', 'passcode']:
if key in hdrs:
hdrs[key] = 'xxx'
return hdrs
def on_connecting(self, host_and_port):
"print debug message on_connecting"
self.logger.debug('on_connecting %s', str(host_and_port))
def on_error(self, headers, message):
"print debug message on_error"
self.logger.debug('received an error HEADERS: %s, MESSAGE: %s', \
str(self.safe_headers(headers)), str(message))
def on_message(self, headers, body):
"print debug message on_message"
self.logger.debug('on_message HEADERS: %s BODY: %s', \
str(self.safe_headers(headers)), str(body))
def on_heartbeat(self):
"print debug message on_heartbeat"
self.logger.debug('on_heartbeat')
def on_send(self, frame):
"print debug message on_send"
self.logger.debug('on_send HEADERS: %s, BODY: %s ...', \
str(self.safe_headers(frame.headers)), str(frame.body)[:160])
def on_connected(self, headers, body):
"print debug message on_connected"
self.logger.debug('on_connected HEADERS: %s, BODY: %s', \
str(self.safe_headers(headers)), str(body))
def on_disconnected(self):
"print debug message on_disconnected"
self.logger.debug('on_disconnected')
def on_heartbeat_timeout(self):
"print debug message on_heartbeat_timeout"
self.logger.debug('on_heartbeat_timeout')
def on_before_message(self, headers, body):
"print debug message on_before_message"
self.logger.debug('on_before_message HEADERS: %s, BODY: %s', \
str(self.safe_headers(headers)), str(body))
return (headers, body)
def broker_ips(host, port):
"Return broker IP addresses from provide host name"
addr = []
for item in socket.getaddrinfo(host, int(port)):
# each item is (family, socktype, proto, canonname, sockaddr) tuple
# so we tack 4th element sockaddr
# for IPv4 sockaddr is (address, port)
# for IPv6 sockaddr is (address, port, flow info, scope id)
# we are interested only in address
addr.append(item[4][0])
return addr
class StompAMQ(object):
"""
Class to generate and send notifications to a given Stomp broker
and a given topic.
:param username: The username to connect to the broker.
:param password: The password to connect to the broker.
:param producer: The 'producer' field in the notification header
:param topic: The topic to be used on the broker
:param validation_schema: schema to use for validation (filename of a valid json file).
If 'None', skip any validation. Look for schema files locally, then in 'schemas/'
folder in CMSMonitoring package or in folder defined in 'CMSMONITORING_SCHEMAS'
environmental variable.
:param host_and_ports: The hosts and ports list of the brokers.
E.g.: [('cms-test-mb.cern.ch', 61313)]
:param cert: path to certificate file
:param key: path to key file
:param validation_loglevel: logging level to use for validation feedback
:param timeout_interval: provides timeout interval to failed broker
:param ipv4_only: use ipv4 servers only
"""
# Version number to be added in header
_version = '0.3'
def __init__(self, username, password, producer, topic, validation_schema,
host_and_ports=None, logger=None, cert=None, key=None,
validation_loglevel=logging.WARNING,
timeout_interval=600, ipv4_only=True):
self._username = username
self._password = password
self._producer = producer
self._topic = topic
logging.basicConfig(level=validation_loglevel)
self.logger = logger if logger else logging.getLogger('StompAMQ')
self._host_and_ports = host_and_ports or [('cms-test-mb.cern.ch', 61313)]
self.ip_and_ports = []
try:
self.logger.info("host and ports: %s", repr(host_and_ports))
if isinstance(host_and_ports, list):
for host, port in host_and_ports:
for ipaddr in broker_ips(host, port):
if (ipaddr, port) not in self.ip_and_ports:
if ipv4_only:
if ipaddr.find(':') == -1:
self.ip_and_ports.append((ipaddr, port))
else:
self.ip_and_ports.append((ipaddr, port))
self.logger.info("resolver: %s", self.ip_and_ports)
except Exception as exp:
self.logger.warn("unable to resolve host_and_ports: %s", str(exp))
self._cert = cert
self._key = key
self._use_ssl = True if key and cert else False
# silence the INFO log records from the stomp library, until this issue gets fixed:
# https://github.com/jasonrbriggs/stomp.py/issues/226
logging.getLogger("stomp.py").setLevel(logging.WARNING)
self.validation_schema = validation_schema
self.validation_loglevel = validation_loglevel
if self.validation_schema is None:
self.logger.warn('No document validation performed!')
self.connections = []
if self.ip_and_ports:
for idx in range(len(self.ip_and_ports)):
host_and_ports = [self.ip_and_ports[idx]]
try:
conn = stomp.Connection(host_and_ports=host_and_ports)
desc = 'host: %s' % host_and_ports
if self._use_ssl:
# This requires stomp >= 4.1.15
conn.set_ssl(for_hosts=host_and_ports, \
key_file=self._key, cert_file=self._cert)
desc = 'host: %s, ckey: %s, cert: %s' \
% (host_and_ports, self._key, self._cert)
self.connections.append((conn, desc))
except Exception as exp:
msg = 'Fail to connect to message broker\n'
msg += 'Host: %s\n' % str(host_and_ports)
msg += 'Error: %s' % str(exp)
self.logger.warn(msg)
else:
try:
conn = stomp.Connection(host_and_ports=self._host_and_ports)
desc = 'host: %s' % self._host_and_ports
if self._use_ssl:
# This requires stomp >= 4.1.15
conn.set_ssl(for_hosts=self._host_and_ports, \
key_file=self._key, cert_file=self._cert)
desc = 'host: %s, ckey: %s, cert: %s' \
% (self._host_and_ports, self._key, self._cert)
self.connections.append((conn, desc))
except Exception as exp:
msg = 'Fail to connect to message broker\n'
msg += 'Host: %s\n' % str(self._host_and_ports)
msg += 'Error: %s' % str(exp)
self.logger.warn(msg)
self.timeouts = {}
self.timeout_interval = timeout_interval
def connect(self):
"Connect to the brokers"
available_connections = []
for conn, desc in self.connections:
# check if we already connected, if so make it available and proceed
if conn.is_connected():
available_connections.append(conn)
continue
conn.set_listener('StompyListener', StompyListener(self.logger))
# check if our connection failed before
# if so we'll wait until timeout_interval is passed
if conn in self.timeouts and \
abs(self.timeouts[conn] - time.time()) < self.timeout_interval:
continue
try:
conn.start()
# If cert/key are used, ignore username and password
if self._use_ssl:
conn.connect(wait=True)
else:
conn.connect(username=self._username, passcode=self._password, wait=True)
available_connections.append(conn)
# we succeed to connect to broker, remove any record in timeout dict
if conn in self.timeouts:
del self.timeouts[conn]
self.logger.debug("Connection to %s is successful", repr(self._host_and_ports))
except Exception as exc:
tstamp = time.strftime("%b %d %Y %H:%M:%S GMT", time.gmtime())
msg = "%s, connection to %s failed, error: %s" \
% (tstamp, desc, str(exc))
self.logger.error(msg)
# record that our connection has failed
self.timeouts[conn] = time.time()
if not available_connections:
return None
# return random connection
idx = random.randint(0, len(available_connections)-1)
self.logger.debug("available connections %s, con_id %s", len(available_connections), idx)
return available_connections[idx]
def disconnect(self):
"Disconnect from brokers"
for conn, _ in self.connections:
if conn.is_connected():
conn.disconnect()
def send(self, data):
"""
Connect to the stomp host and send a single notification
(or a list of notifications).
:param data: Either a single notification (as returned by
`make_notification`) or a list of such.
:return: a list of notification bodies that failed to send
"""
# If only a single notification, put it in a list
if isinstance(data, dict) and 'body' in data:
data = [data]
failedNotifications = []
for notification in data:
conn = self.connect() # provide random connection to brokers
if conn:
result = self._send_single(conn, notification)
if result:
failedNotifications.append(result)
self.disconnect() # disconnect all available connections to brokers
if failedNotifications:
self.logger.warning('Failed to send to %s %i docs out of %i', repr(self._host_and_ports),
len(failedNotifications), len(data))
return failedNotifications
def _send_single(self, conn, notification):
"""
Send a single notification to `conn`
:param conn: An already connected stomp.Connection
:param notification: A dictionary as returned by `make_notification`
:return: The notification body in case of failure, or else None
"""
try:
body = notification.pop('body')
conn.send(destination=self._topic,
headers=notification,
body=json.dumps(body),
ack='auto')
self.logger.debug('Notification %s sent', str(notification))
except Exception as exc:
self.logger.error('Notification: %s (type=%s) not send, error: %s', \
str(notification), type(notification), str(exc))
return
def make_notification(self, payload, docType, docId=None, \
producer=None, ts=None, metadata=None, \
dataSubfield="data", schema=None, \
dropOffendingKeys=False, dropUnknownKeys=False):
"""
Produce a notification from a single payload, adding the necessary
headers and metadata. Generic metadata is generated to include a
timestamp, producer name, document id, and a unique id. User can
pass additional metadata which updates the generic metadata.
If payload already contains a metadata field, it is overwritten.
:param payload: Actual data.
:param docType: document type for metadata.
:param docId: document id representing the notification. If none provided,
a unique id is created.
:param producer: The notification producer name, taken from the StompAMQ
instance producer name by default.
:param ts: timestamp to be added to metadata. Set as time.time() by default
:param metadata: dictionary of user metadata to be added. (Updates generic
metadata.)
:param dataSubfield: field name to use for the actual data. If none, the data
is put directly in the body. Default is "data"
:param schema: Use this schema template to validate the payload. This should be
the name of a json file looked for locally, or inside the folder defined
in the 'CMSMONITORING_SCHEMAS' environment variable, or one of the defaults
provided with the CMSMonitoring package. If 'None', the schema from the
StompAMQ instance is applied. If that is also 'None', no validation is
performed.
:param dropOffendingKeys: Drop keys that failed validation from the notification
:param dropUnknownKeys: Drop keys not present in schema from the notification
:return: a single notifications with the proper headers and metadata and lists of
offending and unknown keys
"""
producer = producer or self._producer
umetadata = metadata or {}
ts = ts or int(time.time())
uuid = str(uuid4())
docId = docId or uuid
# Validate the payload
schema = schema or self.validation_schema
offending_keys, unknown_keys = [], []
if schema:
offending_keys, unknown_keys = validate(payload, schema, self.logger)
if offending_keys:
msg = "Document {} conflicts with schema '{}'".format(docId, schema)
self.logger.warn(msg)
if dropOffendingKeys:
for key in offending_keys:
payload.pop(key)
if unknown_keys:
msg = "Document {} contains keys not present in schema '{}'".format(docId, schema)
self.logger.warn(msg)
if dropUnknownKeys:
for key in unknown_keys:
payload.pop(key)
headers = {'type': docType,
'version': self._version,
'producer': producer}
metadata = {'timestamp': ts,
'producer': producer,
'_id': docId,
'uuid': uuid}
metadata.update(umetadata)
body = {}
if dataSubfield:
body[dataSubfield] = payload
else:
body.update(payload)
body['metadata'] = metadata
notification = {}
notification.update(headers)
notification['body'] = body
return notification, offending_keys, unknown_keys
``` |
{
"source": "jkguiang/rtt-lab",
"score": 2
} |
#### File: hgg/selections/gen_selections.py
```python
import awkward as ak
def set_genZ(events, selection_options, debug):
electron_idxs = abs(events.GenPart_pdgId) == 11
muon_idxs = abs(events.GenPart_pdgId) == 13
tau_idxs = abs(events.GenPart_pdgId) == 15
motherOfElectrons = events.GenPart_genPartIdxMother[electron_idxs]
motherOfMuons = events.GenPart_genPartIdxMother[muon_idxs]
motherOfTaus = events.GenPart_genPartIdxMother[tau_idxs]
ZToEleEvents = ak.sum((events.GenPart_pdgId[motherOfElectrons] == 23), axis=1) >= 2
ZToMuEvents = ak.sum((events.GenPart_pdgId[motherOfMuons] == 23), axis=1) >= 2
ZToTauEvents = ak.sum((events.GenPart_pdgId[motherOfTaus] == 23), axis=1) >= 2
events["genZ_decayMode"] = 1 * ZToEleEvents + 2 * ZToMuEvents + 3 * ZToTauEvents
return events
```
#### File: hgg/selections/photon_selections.py
```python
import awkward
import numpy
import numba
import hgg.selections.selection_utils as utils
"""
Notes: for some reason, event-level and object-level cuts seem to interfere with each other.
E.g. if I do
events = events[event_cut1]
events.Photon = events.Photon[object_cut1]
events = events[event_cut2]
events will regain some of the photons that were eliminated in object_cut1
For this reason, all selections should be done in the following way:
1. Perform event-level selections (may include object-level quantities, e.g. 2 photons with pt/mgg > 0.25)
2. Trim objects with object-level selections afterwards
"""
def create_selected_photons(photons, branches, debug):
map = {}
for branch in branches:
if "selectedPhoton" not in branch:
continue
key = branch.replace("selectedPhoton_","")
map[key] = photons[branch]
selected_photons = awkward.zip(map)
return selected_photons
def select_photons(events, photons, options, debug):
#cut_diagnostics = utils.ObjectCutDiagnostics(objects = photons, cut_set = "[photon_selections.py : select_photons]", debug = debug)
pt_cut = photons.pt > options["photons"]["pt"]
eta_cut1 = abs(photons.eta) < options["photons"]["eta"]
eta_cut2 = abs(photons.eta) < options["photons"]["transition_region_eta"][0]
eta_cut3 = abs(photons.eta) > options["photons"]["transition_region_eta"][1]
eta_cut = eta_cut1 & (eta_cut2 | eta_cut3)
idmva_cut = photons.mvaID > options["photons"]["idmva_cut"]
eveto_cut = photons.electronVeto >= options["photons"]["eveto_cut"]
photon_cut = pt_cut & eta_cut & idmva_cut & eveto_cut
#cut_diagnostics.add_cuts([pt_cut, eta_cut, idmva_cut, eveto_cut, photon_cut], ["pt > 25", "|eta| < 2.5", "idmva", "eveto", "all"])
return photon_cut
def set_photons(events, photons, debug):
events["lead_pho_ptmgg"] = photons.pt[:,0] / events.gg_mass
events["sublead_pho_ptmgg"] = photons.pt[:,1] / events.gg_mass
events["lead_pho_eta"] = photons.eta[:,0]
events["sublead_pho_eta"] = photons.eta[:,1]
events["lead_pho_idmva"] = photons.mvaID[:,0]
events["sublead_pho_idmva"] = photons.mvaID[:,1]
events["lead_pixelSeed"] = photons.pixelSeed[:,0]
events["sublead_pixelSeed"] = photons.pixelSeed[:,1]
return events
``` |
{
"source": "JKGu/perspective",
"score": 2
} |
#### File: tests/manager/test_session.py
```python
import json
import random
from perspective import Table, PerspectiveManager
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
class TestPerspectiveSession(object):
def post(self, msg):
'''boilerplate callback to simulate a client's `post()` method.'''
msg = json.loads(msg)
print("self.post: ", msg)
assert msg["id"] is not None
def validate_post(self, msg, expected=None):
msg = json.loads(msg)
if expected:
assert msg == expected
# test session
def test_session_new_session(self, sentinel):
s = sentinel(False)
def handle_to_dict(msg):
s.set(True)
message = json.loads(msg)
assert message["data"] == data
message = {"id": 1, "table_name": "table1", "view_name": "view1", "cmd": "view"}
manager = PerspectiveManager()
session = manager.new_session()
client_id = session.client_id
table = Table(data)
manager.host_table("table1", table)
# create a view through the session to make sure it has a client id
session.process(message, self.post)
# make sure the client ID is attached to the new view
assert len(manager._views.keys()) == 1
assert manager.get_view("view1")._client_id == client_id
to_dict_message = {"id": 2, "name": "view1", "cmd": "view_method", "method": "to_dict"}
session.process(to_dict_message, handle_to_dict)
assert s.get() is True
def test_session_multiple_new_sessions(self, sentinel):
s = sentinel(0)
def handle_to_dict(msg):
s.set(s.get() + 1)
message = json.loads(msg)
assert message["data"] == {
"a": [1, 2, 3, 1, 2, 3],
"b": ["a", "b", "c", "str1", "str2", "str3"]
}
manager = PerspectiveManager()
sessions = [manager.new_session() for i in range(5)]
table = Table(data)
manager.host_table("table1", table)
# create a view on each session
for i, session in enumerate(sessions):
# IDs have to conflict - each viewer will send the first message as
# ID = 1, so we need to make sure we handle that.
msg = {"id": 1, "table_name": "table1", "view_name": "view" + str(i), "cmd": "view"}
session.process(msg, self.post)
manager_views = list(manager._views.keys())
for key in ["view" + str(i) for i in range(5)]:
assert key in manager_views
for i, session in enumerate(sessions):
view = manager.get_view("view" + str(i))
assert view._client_id == session.client_id
# arbitrarily do an update
random_session_id = random.randint(0, 4)
update_message = {"id": 2, "name": "table1", "cmd": "table_method", "method": "update", "args": [{"a": [1, 2, 3], "b": ["str1", "str2", "str3"]}]}
sessions[random_session_id].process(update_message, self.post)
# should reflect in all sessions
for i, session in enumerate(sessions):
to_dict_message = {"id": 3, "name": "view" + str(i), "cmd": "view_method", "method": "to_dict"}
session.process(to_dict_message, handle_to_dict)
assert s.get() == 5
def test_session_close_session_with_callbacks(self, sentinel):
s = sentinel(0)
manager = PerspectiveManager()
session = manager.new_session()
client_id = session.client_id
# create a table and view using manager
make_table = {"id": 1, "name": "table1", "cmd": "table", "args": [data]}
session.process(make_table, self.post)
make_view = {"id": 2, "table_name": "table1", "view_name": "view1", "cmd": "view"}
session.process(make_view, self.post)
# make sure the client ID is attached to the new view
assert len(manager._views.keys()) == 1
assert manager.get_view("view1")._client_id == client_id
def callback(updated):
assert updated["port_id"] == 0
s.set(s.get() + 100)
# simulate a client that holds callbacks by id
callbacks = {
3: callback
}
def post_update(msg):
# when `on_update` is triggered, this callback gets the message
# and has to decide which callback to trigger.
message = json.loads(msg)
assert message["id"] is not None
if message["id"] == 3:
# trigger callback
assert message["data"] == {
"port_id": 0
}
callbacks[message["id"]](message["data"])
# hook into the created view and pass it the callback
make_on_update = {"id": 3, "name": "view1", "cmd": "view_method", "subscribe": True, "method": "on_update", "callback_id": "callback_1"}
session.process(make_on_update, post_update)
# call updates
update1 = {"id": 4, "name": "table1", "cmd": "table_method", "method": "update", "args": [{"a": [4], "b": ["d"]}]}
update2 = {"id": 5, "name": "table1", "cmd": "table_method", "method": "update", "args": [{"a": [5], "b": ["e"]}]}
session.process(update1, self.post)
session.process(update2, self.post)
assert s.get() == 200
# close the session
session.close()
# make sure the view is gone - but not the table
assert "table1" in manager._tables
assert manager._views == {}
assert len(manager._callback_cache) == 0
def test_session_close_multiple_sessions_with_callbacks(self, sentinel):
s = sentinel(0)
manager = PerspectiveManager()
sessions = [manager.new_session() for i in range(5)]
# create a table and view using manager
make_table = {"id": 1, "name": "table1", "cmd": "table", "args": [data]}
manager._process(make_table, self.post)
# create a view on each session
for i, session in enumerate(sessions):
# IDs have to conflict - each viewer will send the first message as
# ID = 1, so we need to make sure we handle that.
msg = {"id": 2, "table_name": "table1", "view_name": "view" + str(i), "cmd": "view"}
session.process(msg, self.post)
manager_views = list(manager._views.keys())
for key in ["view" + str(i) for i in range(5)]:
assert key in manager_views
for i, session in enumerate(sessions):
view = manager.get_view("view" + str(i))
assert view._client_id == session.client_id
def callback(updated):
assert updated["port_id"] == 0
s.set(s.get() + 100)
# simulate a client that holds callbacks by id
callbacks = {
3: callback
}
def post_update(msg):
# when `on_update` is triggered, this callback gets the message
# and has to decide which callback to trigger.
message = json.loads(msg)
assert message["id"] is not None
if message["id"] == 3:
# trigger callback
assert message["data"] == {
"port_id": 0
}
callbacks[message["id"]](message["data"])
# create a view and an on_update on each session
for i, session in enumerate(sessions):
view_name = "view" + str(i)
# IDs have to conflict - each viewer will send the first message as
# ID = 1, so we need to make sure we handle that.
msg = {"id": 2, "table_name": "table1", "view_name": view_name, "cmd": "view"}
session.process(msg, self.post)
make_on_update = {"id": 3, "name": view_name, "cmd": "view_method", "subscribe": True, "method": "on_update", "callback_id": "callback_1"}
session.process(make_on_update, post_update)
# call updates using a random session - they should propagate
random_session_id = random.randint(0, 4)
random_session = sessions[random_session_id]
random_client_id = random_session.client_id
update1 = {"id": 4, "name": "table1", "cmd": "table_method", "method": "update", "args": [{"a": [4], "b": ["d"]}]}
update2 = {"id": 5, "name": "table1", "cmd": "table_method", "method": "update", "args": [{"a": [5], "b": ["e"]}]}
random_session.process(update1, self.post)
random_session.process(update2, self.post)
# all updates processed, all callbacks fired
assert s.get() == 1000
# close a random session, and make sure the other views and callbacks
# are not affected
random_session.close()
# make sure the view is gone - but not the table
assert "table1" in manager._tables
assert "view" + str(random_session_id) not in manager._views.keys()
assert len(manager._views.keys()) == 4
for callback in manager._callback_cache:
assert callback["client_id"] != random_client_id
assert len(manager._callback_cache) == 4
```
#### File: tests/table/test_table_object.py
```python
import six
import sys
from random import randint
from perspective.table import Table
from datetime import date, datetime
class CustomObjectBlank(object):
pass
class CustomObjectStore(object):
def __init__(self, value):
self._value = value
def _psp_dtype_(self):
return "object"
def __int__(self):
return int(self._value)
def __repr__(self):
return 'test'
class CustomObjectRepr(object):
def __init__(self, value):
self._value = value
def __repr__(self):
return str(self._value)
class CustomObjectIntPromoteToString(CustomObjectRepr):
def _psp_dtype_(self):
return int
class CustomObjectFloatPromoteToString(CustomObjectRepr):
def _psp_dtype_(self):
return float
class CustomObjectIntBoth(CustomObjectRepr):
def _psp_dtype_(self):
return int
def _psp_repr_(self):
return int(self._value) + 1
class CustomObjectFloatBoth(CustomObjectRepr):
def _psp_dtype_(self):
return float
def _psp_repr_(self):
return float(self._value) + 1.0
class CustomObjectIntConvert(CustomObjectRepr):
def _psp_dtype_(self):
return int
def __int__(self):
return int(self._value)
def __repr__(self):
return 'test'
class CustomObjectFloatConvert(CustomObjectRepr):
def _psp_dtype_(self):
return float
def __float__(self):
return float(self._value)
def __repr__(self):
return 'test'
class CustomObjectIntConvertFromFloat(CustomObjectRepr):
def _psp_dtype_(self):
return int
def __float__(self):
return float(self._value)
def __repr__(self):
return 'test'
class CustomObjectFloatConvertFromInt(CustomObjectRepr):
def _psp_dtype_(self):
return float
def __int__(self):
return int(self._value)
def __repr__(self):
return 'test'
class TestTableObjectsExtract(object):
def test_table_custom_object(self):
data = {"a": [CustomObjectBlank()]}
tbl = Table(data)
assert tbl.schema() == {"a": str}
assert tbl.size() == 1
assert '<perspective.tests.table.test_table_object.CustomObjectBlank object at 0x' in tbl.view().to_dict()["a"][0]
def test_table_custom_object_repr(self):
data = {"a": [CustomObjectRepr(1), CustomObjectRepr(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": str}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": ["1", "2"]}
def test_table_custom_object_repr_update(self):
data = {"a": [CustomObjectIntBoth(1), CustomObjectIntBoth(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": int}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": [2, 3]}
tbl.update([{"a": CustomObjectIntBoth(3)}, {"a": CustomObjectIntBoth(4)}])
assert tbl.size() == 4
assert tbl.view().to_dict() == {"a": [2, 3, 4, 5]}
def test_custom_object_int_promote_to_string(self):
data = {"a": [CustomObjectIntPromoteToString(1), CustomObjectIntPromoteToString(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": str}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": ["1", "2"]}
def test_custom_object_float_promote_to_string(self):
data = {"a": [CustomObjectFloatPromoteToString(1), CustomObjectFloatPromoteToString(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": str}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": ["1", "2"]}
def test_custom_object_int_both(self):
data = {"a": [CustomObjectIntBoth(1), CustomObjectIntBoth(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": int}
assert tbl.size() == 2
# We do value + 1 just to make sure
assert tbl.view().to_dict() == {"a": [2, 3]}
def test_custom_object_float_both(self):
data = {"a": [CustomObjectFloatBoth(1), CustomObjectFloatBoth(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": float}
assert tbl.size() == 2
# We do value + 1 just to make sure
assert tbl.view().to_dict() == {"a": [2.0, 3.0]}
def test_custom_object_int_convert(self):
data = {"a": [CustomObjectIntConvert(1), CustomObjectIntConvert(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": int}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": [1, 2]}
def test_custom_object_float_convert(self):
data = {"a": [CustomObjectFloatConvert(1), CustomObjectFloatConvert(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": float}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": [1.0, 2.0]}
def test_custom_object_int_convert_from_float(self):
data = {"a": [CustomObjectIntConvertFromFloat(1), CustomObjectIntConvertFromFloat(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": int}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": [1, 2]}
def test_custom_object_float_convert_from_int(self):
data = {"a": [CustomObjectFloatConvertFromInt(1), CustomObjectFloatConvertFromInt(2)]}
tbl = Table(data)
assert tbl.schema() == {"a": float}
assert tbl.size() == 2
assert tbl.view().to_dict() == {"a": [1.0, 2.0]}
class TestTableObjectsStore(object):
def test_object_passthrough(self):
t = CustomObjectStore(1)
t2 = CustomObjectStore(2)
t3 = CustomObjectStore(3)
data = {"a": [t, t2, t3]}
tbl = Table(data)
assert tbl.schema() == {"a": object}
assert tbl.size() == 3
assert tbl.view().to_dict() == {"a": [t, t2, t3]}
def test_object_referencecount(self):
t = CustomObjectStore(1)
data = {"a": [t]}
tbl = Table(data)
assert tbl.schema() == {"a": object}
assert tbl.size() == 1
assert tbl.view().to_dict() == {"a": [t]}
# Count references
# 1 for `t`, one for `data`, one for argument to sys.getrefcount, and one for the table
assert sys.getrefcount(t) == 4
def test_object_referencecount_update(self):
t = CustomObjectStore(1)
data = {"a": [t]}
tbl = Table(data)
assert tbl.schema() == {"a": object}
assert tbl.size() == 1
assert tbl.view().to_dict() == {"a": [t]}
# Count references
# 1 for `t`, one for `data`, one for argument to sys.getrefcount, and one for the table
assert sys.getrefcount(t) == 4
count = randint(5, 10)
for c in range(count):
tbl.update([data])
# c+1 new copies, +1 for original
assert tbl.size() == (c+1) + 1
# c+1 copies in the table now, +1 for original and +3 for others (`t`, `data`, arg to getrefcount)
assert sys.getrefcount(t) == (c+1) + 4
def test_object_referencecount_clear(self):
t = CustomObjectStore(1)
data = {"a": [t]}
tbl = Table(data)
assert tbl.schema() == {"a": object}
assert tbl.size() == 1
# Count references
# 1 for `t`, one for `data`, one for argument to sys.getrefcount, and one for the table
assert sys.getrefcount(t) == 4
tbl.clear()
assert tbl.size() == 0
# 1 for `t`, one for `data`, one for argument to sys.getrefcount
assert sys.getrefcount(t) == 3
def test_object_referencecount_update_clear(self):
t = CustomObjectStore(1)
data = {"a": [t]}
tbl = Table(data)
assert tbl.schema() == {"a": object}
assert tbl.size() == 1
assert tbl.view().to_dict() == {"a": [t]}
# Count references
# 1 for `t`, one for `data`, one for argument to sys.getrefcount, and one for the table
assert sys.getrefcount(t) == 4
# do random number of updates
count = randint(5, 10)
for _ in range(count):
tbl.update([data])
tbl.clear()
assert tbl.size() == 0
assert tbl.view().to_dict() == {}
# 1 for `t`, one for `data`, one for argument to sys.getrefcount
assert sys.getrefcount(t) == 3
def test_object_referencecount_update_index(self):
t = CustomObjectStore(1)
data = {"a": [0], "b": [t]}
tbl = Table(data, index="a")
assert tbl.schema() == {"a": int, "b": object}
assert tbl.size() == 1
assert tbl.view().to_dict() == {"a": [0], "b": [t]}
# Count references
# 1 for `t`, one for `data`, one for argument to sys.getrefcount, and one for the table
assert sys.getrefcount(t) == 4
# do random number of updates
count = randint(5, 10)
for _ in range(count):
tbl.update([data])
# unchanged
assert tbl.size() == 1
assert sys.getrefcount(t) == 4
tbl.clear()
assert tbl.size() == 0
assert tbl.view().to_dict() == {}
# 1 for `t`, one for `data`, one for argument to sys.getrefcount
assert sys.getrefcount(t) == 3
def test_object_referencecount_update_complicatedsequence(self):
from .object_sequence import run
run()
def test_object_referencecount_delete(self):
t = CustomObjectStore(1)
t2 = CustomObjectStore(2)
t_ref_count = 2
t2_ref_count = 2
tbl = Table({"a": [1], "b": [t]}, index="a")
t_ref_count += 1
assert tbl.schema() == {"a": int, "b": object}
assert tbl.size() == 1
# Count references
# 1 for `t`, 1 for `data`, 1 for argument to sys.getrefcount, and 1 for the table
print(sys.getrefcount(t), "should be", t_ref_count)
print(sys.getrefcount(t2), "should be", t2_ref_count)
tbl.update({"a": [2], "b": [t]})
t_ref_count += 1
tbl.update({"a": [3], "b": [t]})
t_ref_count += 1
assert sys.getrefcount(t) == t_ref_count
tbl.remove([1])
tbl.clear()
assert tbl.size() == 0
assert tbl.view().to_dict() == {}
print(sys.getrefcount(t), "should be", 2)
assert sys.getrefcount(t) == 2
print(sys.getrefcount(t2), "should be", 2)
assert sys.getrefcount(t2) == 2
def test_object_referencecount_delete(self):
from .object_sequence import run2
run2()
``` |
{
"source": "jkh911208/gluster_manager_api",
"score": 3
} |
#### File: gluster_manager_api/controllers/Resource.py
```python
import logging
import config
from db.Database import Database
from controllers.Credential import Credential
from util.SSHClient import SSHClient
import util.convert_to_dict
import util.inventory
class Resource(object):
def __init__(self):
self.db = Database(config.mongodb_uri, config.database_name)
self.db.set_collection("resource")
def __del__(self):
try:
self.db.client.close()
except Exception:
pass
def discover_new_node(self, node: dict) -> bool:
# check if i have all the data i need
required_key = ["address", "cred_id"]
for key in required_key:
if key not in node:
raise ValueError("discover new node require address and credential id")
# get credential detail from DB
cred_tool = Credential()
cred = cred_tool.get_one_credential_with_password(node["cred_id"])
# initiate ssh connection
ssh = SSHClient(node["address"], cred["username"], cred["password"])
# check if user have sudo privilege
if not ssh.check_sudo_privilege():
raise RuntimeError("User don't have sudo previlege")
# check Linux distro
distro = ssh.command("cat /etc/os-release")
distro = util.convert_to_dict.config_to_dict(distro)
try:
if distro["ID"] != "centos":
raise RuntimeError("Only support CentOS 7")
if int(distro["VERSION_ID"]) != 7:
raise RuntimeError("Only support CentOS 7")
except KeyError as err:
logging.exception("cannot verify the linux distro : {}".format(distro))
raise RuntimeError("Only support CentOS 7")
# get disk list in dict
disk_list = util.inventory.get_disk_list(ssh)
# check if address exist in db
exist_data = self.db.find({"address": node["address"]})
if len(exist_data) > 0:
raise ValueError("node is already discovered")
# write node infomation to DB
return self.db.insert({
"address" : node["address"],
"cred_id" : node["cred_id"],
"distro" : distro["ID"],
"version" : int(distro["VERSION_ID"]),
"name" : distro["PRETTY_NAME"] if "PRETTY_NAME" in distro else distro["ID"] + " " + distro["VERSION_ID"],
"disks": disk_list
})
def get_all_nodes(self):
nodes = self.db.find_all()
for node in nodes:
# convert ObjectId to string
node["_id"] = str(node["_id"])
nodes = {"nodes" : nodes}
return nodes
def delete_one_node(self, node_id):
# check if i have all the data i need
required_key = ["id"]
for key in required_key:
if key not in node_id:
raise ValueError("delete node requires id")
#TODO
# Need to add check if node is in use for cluster. if yes do not delete
self.db.delete(node_id["id"])
```
#### File: gluster_manager_api/routes/resource.py
```python
import logging
from controllers.Resource import Resource
from flask import Blueprint, request, jsonify
import paramiko
resource = Blueprint("resource", __name__)
resource_controller = Resource()
@resource.route("/", methods=["POST"])
def discover_new_node():
if not request.is_json:
return jsonify({"error": "only json request is accepted"}), 400
node = request.get_json()
try:
resource_controller.discover_new_node(node)
return jsonify({}), 201
except paramiko.AuthenticationException as err:
logging.exception("Authentication to node : {} failed, please check you credential".format(node["address"]))
return jsonify({"error": err.__str__()}), 401
except Exception as err:
logging.exception("Not able to discover new node with error : {}".format(err.__str__()))
return jsonify({"error": err.__str__()}), 500
@resource.route("/", methods=["GET"])
def get_all_nodes():
try:
nodes = resource_controller.get_all_nodes()
return jsonify(nodes), 200
except Exception as err:
logging.exception("not able to get all nodes from data with error : {}".format(err.__str__()))
return jsonify({"error": err.__str__()}), 500
@resource.route("/", methods=["DELETE"])
def delete_one_node():
if not request.is_json:
return jsonify({"error": "only json request is accepted"}), 400
node_id = request.get_json()
try:
deleted = resource_controller.delete_one_node(node_id)
return jsonify({"deleted": True}), 200
except Exception as err:
logging.exception("not able to get all nodes from data with error : {}".format(err.__str__()))
return jsonify({"error": err.__str__()}), 500
``` |
{
"source": "jkhaak/py3-tic-tac-toe",
"score": 4
} |
#### File: py3-tic-tac-toe/tic_tac_toe/utils.py
```python
import itertools as it
def transpose(xs):
"""
Transpose a matrix
"""
return map(list, zip(*xs))
def concat(xss):
"""
Concatenate list of lists.
"""
return list(it.chain.from_iterable(xss))
def chop(n, xs):
"""
Create a list of lists sized with n from list elements in xs.
"""
if len(xs) == 0:
return []
return [xs[:n]] + chop(n, xs[n:])
def drop_at(nth, xs):
"""
Drop nth element in a list. Returns the init and tail of the xs without the nth element.
"""
if nth <= 0:
return [], xs
elif len(xs) < nth:
return xs, []
return xs[: nth - 1], xs[nth:]
def zip_with(fn, xs, ys):
"""
Standard python zip with function. User can define custom zipping function instead of the standard tuple.
"""
return [fn(a, b) for (a, b) in zip(xs, ys)]
def interleave(item, xs):
"""
Insert an item in between every item of xs.
"""
if len(xs) <= 1:
return xs
return [xs[0]] + [item] + interleave(item, xs[1:])
``` |
{
"source": "jkhadley/capstone-project",
"score": 3
} |
#### File: python/classification/models.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Conv2D, MaxPooling2D, concatenate,Input,Dropout,Dense
from keras.models import Sequential,Model,load_model
from keras.optimizers import Adam,SGD
from skimage import io
def alexNet(params):
# unpack some of the parameters
activation = params['activation']
if os.path.isfile(params['init_w']) == True:
model = load_model(params['init_w'])
else:
init_w = params['init_w']
# make the model
depths = [3,96,256,384,384,256]
inputs = Input((256,256,3))
conv1 = Conv2D(depths[0], kernel_size = (12,12),strides = (4,4),activation = activation,kernel_initializer = init_w)(inputs)
conv2 = Conv2D(depths[1], kernel_size = (5,5) ,strides = (1,1),activation = activation,kernel_initializer = init_w)(conv1)
pool1 = MaxPooling2D(pool_size = (5,5),strides = (2,2))(conv2)
conv3 = Conv2D(depths[2],kernel_size = (3,3) ,strides = (1,1),activation = activation,padding = "same",kernel_initializer = init_w)(pool1)
pool2 = MaxPooling2D(pool_size = (3,3),strides = (2,2))(conv3)
# add final convolutional layers
conv4 = Conv2D(depths[3],kernel_size = (3,3) ,strides = (1,1),activation = activation,padding = "same",kernel_initializer = init_w)(pool2)
conv5 = Conv2D(depths[4],kernel_size = (3,3) ,strides = (1,1),activation = activation,padding = "same",kernel_initializer = init_w)(conv4)
conv6 = Conv2D(depths[5],kernel_size = (3,3) ,strides = (1,1),activation = activation,padding = "same",kernel_initializer = init_w)(conv5)
# add dense layers
dense1 = Dense(params['fc_size'],kernel_initializer = init_w)(conv6)
dense2 = Dense(params['fc_size'],kernel_initializer = init_w)(dense1)
outputs = Dense(params["num_of_classes"],kernel_initializer = init_w,activation= params['output_activation'])(dense2)
# define the inputs and outputs
model = Model(input = inputs,output = outputs)
# define optimizers
optimizer = SGD(lr=params['lr'],momentum = params['momentum'])
# define the model optimizer and loss function
model.compile(optimizer = optimizer, loss = params['loss'], metrics = ['accuracy'])
return model
if __name__ == "__main__":
params = {
'init_w' : 'he_normal',
'lr' : 0.01,
'activation' : "relu",
'loss' : 'categorical_crossentropy',
'num_of_classes' : 4,
'output_activation' : "softmax",
'dropout' : 0.5,
'momentum': 0,
'fc_size': 32
}
model = alexNet(params)
model.summary()
```
#### File: python/classification/splitData.py
```python
from shutil import copy
import random
import os
def splitData(path,c,split):
src = path + "/" + c
trainDest = path + "/train/" + c
validateDest = path + "/validate/" + c
testDest = path + "/test/" + c
cwd = os.getcwd()
os.chdir(src)
images = os.listdir()
random.shuffle(images)
split1 = round(split[0]*len(images))
split2 = round((split[0] + split[1])*len(images))
train = images[:split1]
validate = images[split1:split2]
test = images[split2:]
for i in images:
if i in train:
copy(src + "/" + i,trainDest + "/" + i)
elif i in validate:
copy(src + "/" + i,validateDest + "/" + i)
else:
copy(src + "/" + i,testDest + "/" + i)
os.chdir(cwd)
if __name__ == "__main__":
path = "/home/ubuntu/project/data/maize/"
#path = "../../data/maize"
os.chdir(path)
path = os.getcwd()
classes = os.listdir()
os.mkdir("train")
os.mkdir("validate")
os.mkdir("test")
for c in classes:
os.mkdir("./train/" + c)
os.mkdir("./validate/" + c)
os.mkdir("./test/" + c)
splitData(path, c,[0.8,0.1,0.1])
```
#### File: src/python/misc.py
```python
import os
def dirSize(path,dirs):
"""Finds the size of the directories specified.
Parameters
----------
path: String
Location of the directories in question
dirs: list OR String
Which directories to include in the length
Returns
-------
length : int
Number of images contained in the directories in question.
"""
oldDir = os.getcwd()
os.chdir(path)
cwd = os.getcwd()
length = 0
if isinstance(dirs,str):
dirs = [dirs]
for d in dirs:
path = cwd + "/" + d + "/data/"
subdirs = os.listdir(path)
for s in subdirs:
length += len(os.listdir(path + s))
os.chdir(oldDir)
return length
def combineFiles(first,second):
"""Combine the results from multiple files.
Useful for combining results from multiple inputs and saves as the
first files name with a 2 appended to it.
Parameters
----------
first : String
Path to the first file to be combined
second : String
Path to the second file to be combined
"""
fname = first.replace(".csv","2.csv")
fnew = open(fname,"w")
batch = 0
with open(first,"r") as f:
for line in f:
batch = line.split(",")[-1]
fnew.write(line)
with open(second,"r") as f:
i = 0
for line in f:
if i != 0:
line = line.split(",")
line[-1] += batch
fnew.write(",".join(line))
i +=1
fnew.close()
def countFiles(path):
"""Counts the number of files in the training, validation, and test directories.
Parameters
----------
path : String
path to the train,validata, and test images
"""
os.chdir(path)
dirs = ["train","validate","test"]
for d in dirs:
dirCnt = 0
plant = os.listdir(path + "/" + d)
for i in plant:
tmpDir = path + "/" + d + "/" + i + "/data/"
subDirs = os.listdir(tmpDir)
plntCnt = 0
for j in subDirs:
plntCnt += len(os.listdir(tmpDir + j))
print(d + " " + i + ": " + str(plntCnt))
dirCnt += plntCnt
print(d + ": " + str(dirCnt))
```
#### File: src/python/modelTrainer.py
```python
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, concatenate, Input, Dropout, Lambda
from keras.losses import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
from keras.models import Sequential, Model, load_model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from generators import getBatchGenerators
from callbacks import BatchLogger
from metrics import f1Score, recall, precision, RMSE
from misc import dirSize
import keras.backend as K
import numpy as np
import os
class ModelTrainer():
"""Object to contain the model parameters and train the model."""
def __init__(self,dataPath,resultsPath,modelPath):
"""Initializes class variables.
Parameters
----------
dataPath : String
Path to the base directory of the data classes
resultsPath : String
Path to where the results csv files should be written
modelPath : String
Path to where the models are stored
Attributes
----------
conv_depth : int, (defualt is 64)
Depth of initial Convolutional layer
batch_size : int, (default is 15)
Number of images to load and train with before updating weights
"""
# initialize class variables
self.model = None
self.modelPath = modelPath
self.resultsPath = resultsPath
self.dataPath = dataPath
self.saveName = None
self.classMap = None
self.className = None
self.conv_depth = 64
self.batch_size = 15
self.input_shape = (256,256,3)
self.n_classes = 2
self.metrics = ['acc']
self.init_w = 'zeros'
self.old_weights = None
self.loss_function = 'categorical_crossentropy'
self.optimizer = None
self.regression = False
self.trainGen = None
self.validateGen = None
self.pixels = 256*256
self.dropout = 0
self.batch_log_interval = 10
self.epochs = 5
self.steps_per_epoch = 0
self.validation_steps = 0
self.train_size = 0
self.validate_size = 0
def train(self):
"""Trains the model specified by the parameters.
Creates a model and generators based on the specified
parameters and then trains it. It will save the outputs
according to callback information that is specified.
"""
# setup model
self.createModel()
self.setGenerators()
self.buildCallbacks()
self.printParameters()
# train model
_ = self.model.fit_generator(
generator = self.trainGen,
validation_data = self.validateGen,
steps_per_epoch = self.steps_per_epoch,
validation_steps = self.validation_steps,
epochs = self.epochs,
use_multiprocessing = True,
callbacks = self.callbacks)
# clear save paths to avoid overwriting accidentaly
self.saveName = None
def evaluate(self,**kwargs):
"""Evaluates the model on the training and validation data.
Parameters
----------
validateOnly : boolean
Determines whether to evaluate only the validation data.
Evaluates the trained model that is loaded through the setOldModel method.
"""
# setup model
self.optimizer = SGD(lr = 0,momentum=0,decay = 0)
self.createModel()
self.setGenerators()
self.printParameters()
output = {}
if kwargs['validationOnly'] != None:
if kwargs['validationOnly'] == True:
valOnly = True
else:
valOnly = False
else:
valOnly = False
if valOnly == False:
trainOutput = self.model.evaluate_generator(
generator = self.trainGen,
steps=self.steps_per_epoch,
use_multiprocessing=True,
verbose=1
)
output['loss'] = trainOutput[0]
for i in range(len(self.metricsAsString)):
output[self.metricsAsString[i]] = trainOutput[i+1]
print("loss : " + str(output['loss']))
for i in range(len(self.metricsAsString)):
tmp = self.metricsAsString[i]
print(tmp + " : " + str(output[tmp]))
validationOutput = self.model.evaluate_generator(
generator = self.validateGen,
steps=self.validation_steps,
use_multiprocessing=True,
verbose=1)
output['val_loss'] = validationOutput[0]
for i in range(len(self.metricsAsString)):
output["val_" + self.metricsAsString[i]] = validationOutput[i+1]
print("val_loss : " + str(output['val_loss']))
for i in range(len(self.metricsAsString)):
tmp = "val_" + self.metricsAsString[i]
print(tmp + " : " + str(output[tmp]))
def continueTraining(self,model):
"""Further trains the specified model."""
self.setOldModel(model)
self.model.compile(optimizer = self.optimizer,
loss=self.loss_function,
metrics=self.metrics)
self.setGenerators()
self.buildCallbacks()
self.printParameters()
# fit model to data
_ = self.model.fit_generator(
generator = self.trainGen,
validation_data = self.validateGen,
steps_per_epoch = self.steps_per_epoch,
validation_steps = self.validation_steps,
epochs = self.epochs,
use_multiprocessing = True,
callbacks = self.callbacks)
def createModel(self):
"""Creates a U-net model based on the specified parameters.
If the model is not set to a regression model, the output has the same
depth and width as the input and as many layers as the number of
classes. If the model is set to regression, the output is an array that
contains the proportion of the image that the class is.
"""
outputs, inputs = baseUNet(self.input_shape,
self.conv_depth,
self.n_classes,
self.init_w,
self.dropout)
if self.regression == True:
outputs = Lambda(getPropOfGround)(outputs)
model = Model(inputs = inputs,outputs = outputs)
model.compile(optimizer = self.optimizer,
loss=self.loss_function,
metrics=self.metrics)
if self.old_weights != None:
model.set_weights(self.old_weights)
self.model = model
def singlePrediction(self,img):
"""Make a prediction using the loaded model on a single image.
Parameters
----------
img : np.array
Image to make prediction on
"""
self.optimizer = SGD(lr = 0,momentum=0,decay = 0)
self.createModel()
output = self.model.predict(np.expand_dims(img,axis = 0))
return output
def buildCallbacks(self):
"""Builds the callbacks that save the model weights and results.
Saves the model checkpoint and logger to the paths specified by
modelPath and resultsPath, and then gives them the names
specified by saveName.
"""
model_checkpoint = ModelCheckpoint(self.modelPath + '/' + self.saveName + '.hdf5',
monitor='loss',verbose=1)
logger = BatchLogger(self.resultsPath + '/' + self.saveName + "_batch.csv",
self.resultsPath + '/' + self.saveName + "_epoch.csv",
self.batch_log_interval,
self.metricsAsString)
self.callbacks = [model_checkpoint,logger]
def setSaveName(self,name):
"""Sets the name to save the results and model weights with."""
self.saveName = name
def setOldModel(self,model):
"""Gets the model parameters from the specified model.
Gets the weights, input shape, and number of classes from the
old model to load into the new model to do more training or
switch model type.
Parameters
----------
model: String
Path to the old keras model object to be loaded
"""
self.modelName = model
oldModel = load_model(self.modelPath + "/" + model + ".hdf5",
custom_objects={'recall': recall,
'precision': precision,
'f1Score':f1Score})
self.old_weights = oldModel.get_weights()
self.input_shape = oldModel.inputs[0].get_shape().as_list()[1:]
self.n_classes = oldModel.outputs[0].get_shape().as_list()[-1]
self.conv_depth = oldModel.layers[1].output_shape[-1]
self.model = oldModel
def setRegression(self):
"""Set the model to a regression model.
Set the model to a regression model and changes the loss
function to MSE.
"""
self.regression = True
self.loss_function = mean_squared_error
def setSegmentation(self):
"""Set the model to a segmentation model.
Sets the model to segmentation and changes the loss function to
categorical cross-entropy.
"""
self.regression = False
self.loss_function = "categorical_crossentropy"
def setClassMap(self,classMap):
"""Set the class map that specifies which directory corresponds to which class.
Parameters
----------
classMap : dictionary
Mapping of directories to correct output
"""
self.classMap = classMap
self.n_classes = len(np.unique(list(classMap.values()))) + 1
# find the number of images in the data set
dirs = list(classMap.keys())
self.train_size = dirSize(self.dataPath + "train",dirs)
self.validate_size = dirSize(self.dataPath + "validate",dirs)
# set steps per epochs
self.steps_per_epoch = round(self.train_size/self.batch_size)
self.validation_steps = round(self.validate_size/self.batch_size)
def setClassName(self,whichDir):
"""Specify the single directory to use on the dataPath.
Parameters
----------
whichDir: String
Name of the directory to be used for training
"""
self.className = whichDir
self.setClassMap({whichDir : 1})
def setOptimizerParams(self,lr,momentum,decay):
"""Set the SGD Optimizer parameters used to change the weights.
Parameters
----------
lr : float [0->1]
Learning rate for SGD
momentum : float [0->1]
Momentum for SGD
decay : float[0->1]
Weight decay for SGD
"""
self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)
def setWeightInitializer(self,weights):
"""Set the weight initializer to use for model initialization.
Parameters
----------
weights: String
Weight initializer to use to intialize model with
"""
self.init_w = weights
def setGenerators(self):
"""Create the training and validation data generators.
Uses the batch_size, classMap, and regression parameters to
create generators that will generate the appropriate data.
"""
shape = (self.input_shape[0],self.input_shape[1])
self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,
self.dataPath,
shape,
self.classMap,
self.regression)
def changeModelSavePath(self,path):
"""Change the path that the model is saved to.
Parameters
----------
path : String
Path to save the model to
"""
self.modelPath = path
def changeDropout(self,dropout):
"""Change the dropout for the model.
Parameters
----------
dropout: float [0->1]
Proportion of nodes to randomly drop each batch update.
"""
self.dropout = dropout
def changeResultsSavePath(self,path):
"""Change where the logger results are saved to.
Parameters
----------
path : String
Path to save the logger results to
"""
self.resultsPath = path
def changeDataPath(self,path):
"""Change the directory to look for the data in.
Parameters
----------
path : String
Base directory that the data is located at
"""
self.dataPath = path
def changeInputShape(self,shape):
"""Change the Input shape that the model should use.
Parameters
----------
shape : tuple
Input shape for the model
"""
self.input_shape = shape
def changeLossFunction(self,loss):
"""Change the Loss Function that changes the model weights.
Parameters
----------
loss : int
The loss function to evaluate the model with
"""
self.loss_function = loss
def changeBatchLogInterval(self,interval):
"""Change the interval that the batches are logged at.
Parameters
----------
interval : int
Interval that batches will be logged at
"""
self.batch_log_interval = interval
def changeConvolutionalDepth(self,depth):
"""Change the depth of the initial convolutional layers that
are used in the model.
Parameters
-----------
depth : int
Depth of the first convolutional layer
"""
self.conv_depth = depth
def changeMetrics(self, metrics):
"""Changes the metrics that will be used to evauluate the
model.
Parameters
----------
metrics : list
List of metrics that will be used to evaluate the model
"""
if isinstance(metrics,list) == False:
metrics = [metrics]
self.metrics = metrics
whatMetrics = []
for i in metrics:
if i == RMSE:
whatMetrics.append("RMSE")
elif i == f1Score:
whatMetrics.append("f1Score")
elif i == recall:
whatMetrics.append("recall")
elif i == precision:
whatMetrics.append("precision")
elif i == mean_squared_error:
whatMetrics.append("mean_squared_error")
elif i == mean_absolute_error:
whatMetrics.append("mean_absolute_error")
elif i == mean_absolute_percentage_error:
whatMetrics.append("mean_absolute_percentage_error")
elif isinstance(i,str):
whatMetrics.append(i)
else:
print("I don't know what to do with : " + str(i))
self.metricsAsString = whatMetrics
def changeBatchSize(self,batch_size):
"""Changes the batch size of the batches that the model will
be trained on.
Parameters
----------
batch_size : int
Number of sets of images to train on before updating the
weights.
"""
self.batch_size = batch_size
self.steps_per_epoch = round(self.train_size/self.batch_size)
self.validation_steps = round(self.validate_size/self.batch_size)
def changeEpochs(self,epochs):
""" Changes the number of epochs that the model will train for.
Parameters
----------
epochs : int
Number of times the model will see all of the data
"""
self.epochs = epochs
def printParameters(self):
"""Prints the model parameters."""
print("----------Model Parameters----------")
print("Initial Conv. Depth : " + str(self.conv_depth))
print("Number of Classes : " + str(self.n_classes))
print("Dropout : " + str(self.dropout))
print("Activation Function : Relu")
print("Input Shape : " + str(self.input_shape))
print("Batch Size : " + str(self.batch_size))
print("--------Optimizer Parameters--------")
print("Learning Rate : " + str(self.optimizer.lr))
print("Momentum : " + str(self.optimizer.momentum))
print("Initial Decay : " + str(self.optimizer.initial_decay))
def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):
"""Creates a basic U-net segmentation model.
Parameters
----------
input_shape : tuple
Size of the input that the model should accept
conv_depth : int
Depth of the first convolutional layer
n_classes : int
Number of classes that the model should predict
init_w : String
Weight initializer to use for the nodes
dropout : float [0->1]
Proportion of the middle convolutional layer to randomly ignore
each training iteration
Returns
-------
outputs : keras functional model
output layer to compile the model
inputs : keras layer
input layer to compile the model
"""
inputs = Input(input_shape)
c1=Conv2D(conv_depth,
(3,3),
activation='relu',
padding='same',
kernel_initializer=init_w)(inputs)
c1=Conv2D(conv_depth,
(3,3),
activation='relu',
padding="same",
kernel_initializer=init_w)(c1)
# pool down to next layer
pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)
conv_depth *= 2
# convolute down again
conv2 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(pool1)
conv2 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv2)
# pool down again
pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)
conv_depth *= 2
# Convolution
conv3 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(pool2)
conv3 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv3)
# pool down
pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)
conv_depth *= 2
# Convolution
conv4 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(pool3)
conv4 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv4)
# pool down
pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)
conv_depth *=2
# Convolution
conv5 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(pool4)
conv5 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv5)
drop = Dropout(dropout)(conv5)
conv_depth /= 2
conv_depth = int(conv_depth)
# do upsampling
up1 = UpSampling2D(size = (2,2))(drop)
conv6 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(up1)
# add in skip info
cat1 = concatenate([conv4,conv6],axis = 3)
conv6 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(cat1)
conv6 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv6)
conv_depth /= 2
conv_depth = int(conv_depth)
# do upsampling
up2 = UpSampling2D(size = (2,2))(conv6)
conv7 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(up2)
# add in skip info
cat2 = concatenate([conv3,conv7],axis = 3)
conv7 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(cat2)
conv7 = Conv2D(conv_depth,
activation = 'relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv7)
conv_depth /= 2
conv_depth = int(conv_depth)
# do upsampling
up3 = UpSampling2D(size = (2,2))(conv7)
conv8 = Conv2D(conv_depth,
activation ='relu',
kernel_size=(3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(up3)
# add in skip info
cat3 = concatenate([conv2,conv8],axis = 3)
conv8 = Conv2D(conv_depth,
activation ='relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(cat3)
conv8 = Conv2D(conv_depth,
activation ='relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv8)
conv_depth /= 2
conv_depth = int(conv_depth)
# do upsampling
up4 = UpSampling2D(size = (2,2))(conv8)
conv9 = Conv2D(conv_depth,
activation ='relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(up4)
# add in skip info
cat4 = concatenate([c1,conv9],axis = 3)
conv9 = Conv2D(conv_depth,
activation ='relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(cat4)
conv9 = Conv2D(conv_depth,
activation ='relu',
kernel_size = (3,3),
strides = (1,1),
padding = "same",
kernel_initializer=init_w)(conv9)
outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)
return outputs,inputs
# functions for determining the regression output
def getPropOfGround(x):
"""Finds and returns the proportion of the ground for each class."""
return K.sum(K.sum(x,axis = 1),axis = 1)/65536
``` |
{
"source": "JKhakpour/dateparser",
"score": 2
} |
#### File: dateparser/dateparser/date.py
```python
import calendar
import collections
from datetime import datetime, timedelta
from warnings import warn
import six
import regex as re
from dateutil.relativedelta import relativedelta
from dateparser.date_parser import date_parser
from dateparser.freshness_date_parser import freshness_date_parser
from dateparser.languages.loader import LanguageDataLoader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import apply_settings
from dateparser.utils import normalize_unicode, apply_timezone_from_settings
APOSTROPHE_LOOK_ALIKE_CHARS = [
u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019'
u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc'
u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb'
u'\N{ARMENIAN APOSTROPHE}', # u'\u055a'
u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c'
u'\N{PRIME}', # u'\u2032'
u'\N{REVERSED PRIME}', # u'\u2035'
u'\N{MODIFIER LETTER PRIME}', # u'\u02b9'
u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07'
]
RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE)
RE_SPACES = re.compile(r'\s+')
RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$')
RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M)
RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U)
RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I)
RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)')
RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS))
RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])')
def sanitize_spaces(html_string):
html_string = RE_NBSP.sub(' ', html_string)
html_string = RE_SPACES.sub(' ', html_string)
html_string = RE_TRIM_SPACES.sub(r'\1', html_string)
return html_string
def date_range(begin, end, **kwargs):
dateutil_error_prone_args = ['year', 'month', 'week', 'day', 'hour',
'minute', 'second']
for arg in dateutil_error_prone_args:
if arg in kwargs:
raise ValueError("Invalid argument: %s" % arg)
step = relativedelta(**kwargs) if kwargs else relativedelta(days=1)
date = begin
while date < end:
yield date
date += step
# handles edge-case when iterating months and last interval is < 30 days
if kwargs.get('months', 0) > 0 and (date.year, date.month) == (end.year, end.month):
yield end
def get_intersecting_periods(low, high, period='day'):
if period not in ['year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'microsecond']:
raise ValueError("Invalid period: {}".format(period))
if high <= low:
return
step = relativedelta(**{period + 's': 1})
current_period_start = low
if isinstance(current_period_start, datetime):
reset_arguments = {}
for test_period in ['microsecond', 'second', 'minute', 'hour']:
if test_period == period:
break
else:
reset_arguments[test_period] = 0
current_period_start = current_period_start.replace(**reset_arguments)
if period == 'week':
current_period_start \
= current_period_start - timedelta(days=current_period_start.weekday())
elif period == 'month':
current_period_start = current_period_start.replace(day=1)
elif period == 'year':
current_period_start = current_period_start.replace(month=1, day=1)
while current_period_start < high:
yield current_period_start
current_period_start += step
def sanitize_date(date_string):
date_string = RE_SANITIZE_SKIP.sub(' ', date_string)
date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'г.' (Russian for year) but not in words
date_string = sanitize_spaces(date_string)
date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string)
date_string = RE_SANITIZE_ON.sub(r'\1', date_string)
date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string)
return date_string
def get_date_from_timestamp(date_string, settings):
if RE_SEARCH_TIMESTAMP.search(date_string):
date_obj = datetime.fromtimestamp(int(date_string[:10]))
date_obj = apply_timezone_from_settings(date_obj, settings)
return date_obj
def get_last_day_of_month(year, month):
return calendar.monthrange(year, month)[1]
def parse_with_formats(date_string, date_formats, settings):
""" Parse with formats and return a dictionary with 'period' and 'obj_date'.
:returns: :class:`datetime.datetime`, dict or None
"""
period = 'day'
for date_format in date_formats:
try:
date_obj = datetime.strptime(date_string, date_format)
except ValueError:
continue
else:
# If format does not include the day, use last day of the month
# instead of first, because the first is usually out of range.
if '%d' not in date_format:
period = 'month'
date_obj = date_obj.replace(
day=get_last_day_of_month(date_obj.year, date_obj.month))
if not ('%y' in date_format or '%Y' in date_format):
today = datetime.today()
date_obj = date_obj.replace(year=today.year)
date_obj = apply_timezone_from_settings(date_obj, settings)
return {'date_obj': date_obj, 'period': period}
else:
return {'date_obj': None, 'period': period}
class _DateLanguageParser(object):
DATE_FORMATS_ERROR_MESSAGE = "Date formats should be list, tuple or set of strings"
def __init__(self, language, date_string, date_formats, settings=None):
self._settings = settings
if isinstance(date_formats, six.string_types):
warn(self.DATE_FORMATS_ERROR_MESSAGE, FutureWarning)
date_formats = [date_formats]
elif not (date_formats is None or isinstance(date_formats, (list, tuple, collections.Set))):
raise TypeError(self.DATE_FORMATS_ERROR_MESSAGE)
self.language = language
self.date_string = date_string
self.date_formats = date_formats
self._translated_date = None
self._translated_date_with_formatting = None
@classmethod
def parse(cls, language, date_string, date_formats=None, settings=None):
instance = cls(language, date_string, date_formats, settings)
return instance._parse()
def _parse(self):
for parser in (
self._try_timestamp,
self._try_freshness_parser,
self._try_given_formats,
self._try_parser,
self._try_hardcoded_formats,
):
date_obj = parser()
if self._is_valid_date_obj(date_obj):
return date_obj
else:
return None
def _try_timestamp(self):
return {
'date_obj': get_date_from_timestamp(self.date_string, self._settings),
'period': 'day',
}
def _try_freshness_parser(self):
return freshness_date_parser.get_date_data(self._get_translated_date(), self._settings)
def _try_parser(self):
_order = self._settings.DATE_ORDER
try:
if self._settings.PREFER_LANGUAGE_DATE_ORDER:
self._settings.DATE_ORDER = self.language.info.get('dateorder', _order)
date_obj, period = date_parser.parse(
self._get_translated_date(), settings=self._settings)
self._settings.DATE_ORDER = _order
return {
'date_obj': date_obj,
'period': period,
}
except ValueError:
self._settings.DATE_ORDER = _order
return None
def _try_given_formats(self):
if not self.date_formats:
return
return parse_with_formats(
self._get_translated_date_with_formatting(),
self.date_formats, settings=self._settings
)
def _try_hardcoded_formats(self):
hardcoded_date_formats = [
'%B %d, %Y, %I:%M:%S %p',
'%b %d, %Y at %I:%M %p',
'%d %B %Y %H:%M:%S',
'%A, %B %d, %Y',
'%Y-%m-%dT%H:%M:%S.%fZ'
]
try:
return parse_with_formats(
self._get_translated_date_with_formatting(),
hardcoded_date_formats,
settings=self._settings
)
except TypeError:
return None
def _get_translated_date(self):
if self._translated_date is None:
self._translated_date = self.language.translate(
self.date_string, keep_formatting=False, settings=self._settings)
return self._translated_date
def _get_translated_date_with_formatting(self):
if self._translated_date_with_formatting is None:
self._translated_date_with_formatting = self.language.translate(
self.date_string, keep_formatting=True, settings=self._settings)
return self._translated_date_with_formatting
def _is_valid_date_obj(self, date_obj):
if not isinstance(date_obj, dict):
return False
if len(date_obj) != 2:
return False
if 'date_obj' not in date_obj or 'period' not in date_obj:
return False
if not date_obj['date_obj']:
return False
if date_obj['period'] not in ('day', 'week', 'month', 'year'):
return False
return True
class DateDataParser(object):
"""
Class which handles language detection, translation and subsequent generic parsing of
string representing date and/or time.
:param languages:
A list of two letters language codes, e.g. ['en', 'es'].
If languages are given, it will not attempt to detect the language.
:type languages: list
:param allow_redetect_language:
Enables/disables language re-detection.
:type allow_redetect_language: bool
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: A parser instance
:raises:
ValueError - Unknown Language, TypeError - Languages argument must be a list
"""
language_loader = None
@apply_settings
def __init__(self, languages=None, allow_redetect_language=False, settings=None):
self._settings = settings
available_language_map = self._get_language_loader().get_language_map()
if isinstance(languages, (list, tuple, collections.Set)):
if all([language in available_language_map for language in languages]):
languages = [available_language_map[language] for language in languages]
else:
unsupported_languages = set(languages) - set(available_language_map.keys())
raise ValueError(
"Unknown language(s): %s" % ', '.join(map(repr, unsupported_languages)))
elif languages is not None:
raise TypeError("languages argument must be a list (%r given)" % type(languages))
if allow_redetect_language:
self.language_detector = AutoDetectLanguage(
languages if languages else list(available_language_map.values()),
allow_redetection=True)
elif languages:
self.language_detector = ExactLanguages(languages=languages)
else:
self.language_detector = AutoDetectLanguage(
list(available_language_map.values()), allow_redetection=False)
def get_date_data(self, date_string, date_formats=None):
"""
Parse string representing date and/or time in recognizable localized formats.
Supports parsing multiple languages and timezones.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages.
:type date_formats: list
:return: a dict mapping keys to :mod:`datetime.datetime` object and *period*. For example:
{'date_obj': datetime.datetime(2015, 6, 1, 0, 0), 'period': u'day'}
:raises: ValueError - Unknown Language
.. note:: *Period* values can be a 'day' (default), 'week', 'month', 'year'.
*Period* represents the granularity of date parsed from the given string.
In the example below, since no day information is present, the day is assumed to be current
day ``16`` from *current date* (which is June 16, 2015, at the moment of writing this).
Hence, the level of precision is ``month``:
>>> DateDataParser().get_date_data(u'March 2015')
{'date_obj': datetime.datetime(2015, 3, 16, 0, 0), 'period': u'month'}
Similarly, for date strings with no day and month information present, level of precision
is ``year`` and day ``16`` and month ``6`` are from *current_date*.
>>> DateDataParser().get_date_data(u'2014')
{'date_obj': datetime.datetime(2014, 6, 16, 0, 0), 'period': u'year'}
Dates with time zone indications or UTC offsets are returned in UTC time unless
specified using `Settings`_.
>>> DateDataParser().get_date_data(u'23 March 2000, 1:21 PM CET')
{'date_obj': datetime.datetime(2000, 3, 23, 14, 21), 'period': 'day'}
"""
if not(isinstance(date_string, six.text_type) or isinstance(date_string, six.string_types)):
raise TypeError('Input type must be str or unicode')
res = parse_with_formats(date_string, date_formats or [], self._settings)
if res['date_obj']:
return res
if self._settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = sanitize_date(date_string)
for language in self.language_detector.iterate_applicable_languages(
date_string, modify=True, settings=self._settings):
parsed_date = _DateLanguageParser.parse(
language, date_string, date_formats, settings=self._settings)
if parsed_date:
parsed_date['language'] = language.shortname
return parsed_date
else:
return {'date_obj': None, 'period': 'day', 'language': None}
def get_date_tuple(self, *args, **kwargs):
date_tuple = collections.namedtuple('DateData', 'date_obj period language')
date_data = self.get_date_data(*args, **kwargs)
return date_tuple(**date_data)
@classmethod
def _get_language_loader(cls):
if not cls.language_loader:
cls.language_loader = LanguageDataLoader()
return cls.language_loader
``` |
{
"source": "JKhan01/SM446_TeamXYZ",
"score": 3
} |
#### File: SM446_TeamXYZ/chatBotStable/actions.py
```python
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from modules.Bitbucket import bitbucketActions
from modules.ErrorSearch import searchStack
import json
from functions import *
from jira_package import *
from g5 import *
from g6 import *
obj = bitbucketActions()
class CommitByUserForm(FormAction):
def name(self) -> Text:
return "commit_by_user_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","user_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_user(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('user_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitByBranchForm(FormAction):
def name(self) -> Text:
return "commit_by_branch_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","branch_name"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_branch(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('branch_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class CommitMsgForm(FormAction):
def name(self) -> Text:
return "commit_msg_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
# if (tracker.get_slot("bitbucket_action")):
# if ("watchers" in tracker.get_slot("bitbucket_action") or "list of watchers" in tracker.get_slot("bitbucket_action")):
# return ["bitbucket_action","repo_name","owner_name"]
# if (tracker.get_slot("search_keys")):
# if ("who" or "who all" in tracker.get_slot("search_keys")):
# return ["bitbucket_action","repo_name","owner_name"]
return ["repo_name","owner_name","message"]
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_commit_by_msg(tracker.get_slot('repo_name'),
tracker.get_slot('owner_name'), tracker.get_slot('message'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class WatcherListForm(FormAction):
def name(self) -> Text:
return "watcher_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_watchers(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class ErrorSearchForm(FormAction):
def __init__(self):
self.error_query = ""
def name(self) -> Text:
return "error_search_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["error_query"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
obj = searchStack()
returnVar = {}
returnVar['reply'] = obj.searchStack(tracker.get_slot("error_query"))
returnVar['status'] = 200
returnVar['type'] = 'stackoverflow'
returnVar = json.dumps(returnVar)
dispatcher.utter_message(text=returnVar)
return []
class BranchListForm(FormAction):
def name(self):
return "branch_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["repo_name","owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
returnAnswer = obj.get_branches(tracker.get_slot('repo_name'),tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
class RepoListForm(FormAction):
def name(self):
return "repo_list_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["owner_name"]
def submit(self,dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
print (f"Target Repo: {tracker.get_slot('owner_name')}")
returnAnswer = obj.get_repos(tracker.get_slot('owner_name'))
returnAnswer['type'] = 'bitbucket'
txt = json.dumps(returnAnswer)
dispatcher.utter_message(text=txt)
return []
# Information about all the spaces
class InfoAllSpaces(Action):
def name(self) -> Text:
return "action_info_of_all_spaces"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
t = get_all_spaces()
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new space
class CreateSpace(FormAction):
def name(self) -> Text:
return "create_space_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"key": [self.from_entity(entity="key"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["key", "space"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('key'))
b = str(tracker.get_slot('space'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_space(a, b)
#return [t]
#t =
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
# txt = json.loads(t)
dispatcher.utter_message(text="Space Created")
return []
# Info of a specific space
class InfoSpace(Action):
def name(self) -> Text:
return "action_space_info"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("key"))
t = get_info_space(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Get pages in a space
class GetPagesInSpace(Action):
def name(self) -> Text:
return "action_get_pages_in_a_space"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = str(tracker.get_slot("space"))
t = get_pages_in_a_space(a)
tx = json.dumps(t, indent=4)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Create a new page
class CreatePage(FormAction):
def name(self) -> Text:
return "create_page_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"space": [self.from_entity(entity="space"),
self.from_text()],
"title": [self.from_entity(entity="title"),
self.from_text()],
"body": [self.from_entity(entity="body", intent="body_entry"),
self.from_text()]}
# def validate_body(
# self, value:Text,
# dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["space", "title", "body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('space'))
b = str(tracker.get_slot('title'))
c = str(tracker.get_slot("body"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
create_page(a, b, c)
#dispatcher.utter_message(text="Page Created")
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Created")
return []
# Delete a Page
class DeletePage(Action):
def name(self) -> Text:
return "action_delete_page"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
delete_page(a)
dispatcher.utter_message(text="Page Deleted")
return []
# Get Page info using id
class GetPageInfoById(Action):
def name(self) -> Text:
return "action_get_page_info_by_id"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a = int(str(tracker.get_slot("page_id")))
t = page_info_by_id(a)
tx = json.dumps(t, indent = 2)
txt = json.loads(tx)
dispatcher.utter_message(text=txt)
return []
# Export Page as PDF
class ExportPageAsPdf(FormAction):
def name(self) -> Text:
return "export_page_as_pdf_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"page_id": [self.from_entity(entity="page_id"),
self.from_text()],
"file_name": [self.from_entity(entity="file_name"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["page_id", "file_name"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
#dispatcher.utter_message(text="Kya baat hai!!")
#dispatcher.utter_message(template="utter_submit")
a = str(tracker.get_slot('page_id'))
b = str(tracker.get_slot('file_name'))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
export_page_as_pdf(a, b)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Page Exported")
return []
class GetUserAllProject(FormAction):
def name(self) -> Text:
return "get_all_project_name_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return []
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_all_project_name()
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetUserInGroup(FormAction):
def name(self) -> Text:
return "get_user_in_group_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['group_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_users_in_group(tracker.get_slot('group_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueProject(FormAction):
def name(self) -> Text:
return "get_issue_in_project_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['project_name']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issues_in_project(tracker.get_slot('project_name'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssue(FormAction):
def name(self) -> Text:
return "get_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpic(FormAction):
def name(self) -> Text:
return "get_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTask(FormAction):
def name(self) -> Text:
return "get_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfIssue(FormAction):
def name(self) -> Text:
return "get_status_of_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfEpic(FormAction):
def name(self) -> Text:
return "get_status_of_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetStatusOfTask(FormAction):
def name(self) -> Text:
return "get_status_of_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_status_of_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetIssueVersion(FormAction):
def name(self) -> Text:
return "get_issue_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetEpicVersion(FormAction):
def name(self) -> Text:
return "get_epic_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetTaskVersion(FormAction):
def name(self) -> Text:
return "get_task_version_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_issue_version(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentIssue(FormAction):
def name(self) -> Text:
return "get_comment_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentEpic(FormAction):
def name(self) -> Text:
return "get_comment_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetCommentTask(FormAction):
def name(self) -> Text:
return "get_comment_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_comments_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogIssue(FormAction):
def name(self) -> Text:
return "get_worklog_issue_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['issue_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('issue_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogTask(FormAction):
def name(self) -> Text:
return "get_worklog_task_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['task_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('task_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetWorklogEpic(FormAction):
def name(self) -> Text:
return "get_worklog_epic_form"
## return the same form name
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ['epic_summary']
def submit(self, dispatcher: CollectingDispatcher,tracker: Tracker,domain: Dict[Text, Any]) -> List[Dict]:
dispatcher.utter_message(text="Parameters Submitted")
ret_data = get_worklog_in_issue(tracker.get_slot('epic_summary'))
txt = json.dumps(ret_data)
dispatcher.utter_message(text=txt)
return []
class GetLatestInboxEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_in_inbox"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
op = int(tracker.latest_message.get('text'))
t = LatestMailInInbox(op)
# tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestUserEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_user"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromUser(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class GetLatestLabelEmail(Action):
def name(self) -> Text:
return "action_get_latest_email_from_label"
# @staticmethod
# def required_slots(tracker: Tracker) -> List[Text]:
# """ The required entries for this function """
# print("required_slots(tracker : Tracker)")
# return ["query"]
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
q = str(tracker.get_slot("query"))
op = int(tracker.latest_message.get('text'))
t = GetLatestMailFromLabel(q, op)
#tx = json.dumps(t, indent = 4)
# txt = json.loads(tx)
# txtt = json.dumps(txt, indent = 2)
dispatcher.utter_message(text=t)
return []
class SendEmail(FormAction):
def name(self) -> Text:
return "send_email_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMail(a, b, c)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
class SendEmailWithAttachments(FormAction):
def name(self) -> Text:
return "send_email_with_attachments_form"
def slot_mappings(self):
# type: () -> Dict[Text: Union[Dict, List[Dict]]]
return {"email_body": [self.from_entity(entity="email_body"),
self.from_text()],
"receiver": [self.from_entity(entity="receiver"),
self.from_text()],
"subject": [self.from_entity(entity="subject"),
self.from_text()],
"file_dir": [self.from_entity(entity="file_dir"),
self.from_text()],
"filename": [self.from_entity(entity="filename"),
self.from_text()]}
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
""" The required entries for this function """
print("required_slots(tracker : Tracker)")
return ["receiver", "subject", "email_body", "file_dir", "filename"]
def submit(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
a = str(tracker.get_slot("email_body"))
b = str(tracker.get_slot("receiver"))
c = str(tracker.get_slot("subject"))
d = str(tracker.get_slot("file_dir"))
e = str(tracker.get_slot("filename"))
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
SendMailWithAttachments(a, b, c, d, e)
return []
run(self, CollectingDispatcher, Tracker, Dict[Text, Any])
dispatcher.utter_message(text="Email Sent")
return []
```
#### File: SM446_TeamXYZ/chatBotStable/functions.py
```python
from atlassian import Confluence
from pprint import pprint
import json
cf = Confluence(
# url= '''' atlassian wwebsite of teh end-client,
# username='''' registered email-id of the end client,
# password=''''' access token to access the project pages of the end client
)
# Define all the Confluence functions here
# Then reference these functions to the code of assistant after intent identification
# Based on the intent, call a particular function
# Take input, and give appropriate output
# 1. Users
# 2. Spaces
def create_space(key, name):
#key = input(print("Enter the space key:"))
#name = input(print("Enter the name of the space:"))
cf.create_space(key, name)
print("Space Created")
return (json.dumps("Space Created"))
#return (print(json.dumps("Space Created")))
def get_all_spaces():
a = cf.get_all_spaces()
e = []
for i in range(len(a)):
d = {}
d['id'] = a[i]['id']
d['key'] = a[i]['key']
d['name'] = a[i]['name']
d['status'] = a[i]['status']
d['type'] = a[i]['type']
e.append(d)
#return(print(json.dumps(e, indent = 2)))
return(json.dumps(e, indent = 2))
def get_info_space(key):
#key = int(input("Enter the space key:"))
a = cf.get_space(key, expand='description.plain,homepage')
d = {}
d['id'] = a['id']
d['key'] = a['key']
d['name'] = a['name']
d['status'] = a['status']
d['type'] = a['type']
d['homepage_id'] = a['homepage']['id']
d['homepage_title'] = a['homepage']['title']
return(json.dumps(d, indent = 2))
def get_pages_in_a_space(name):
#name = input(print("Enter the name of the space"))
a = cf.get_all_pages_from_space(name, start=0, limit=50, status=None, expand=None, content_type='page')
e = []
for i in range(len(a)):
d = {}
d['id'] = a[i]['id']
d['status'] = a[i]['status']
d['title'] = a[i]['title']
d['type'] = a[i]['type']
e.append(d)
return(json.dumps(e, indent = 2))
# 3. Pages
def create_page(space, title, body):
cf.create_page(space, title, body)
print("Page Created")
#return (json.dumps("Page Created"))
#return (print(json.dumps("Page Created")))
def delete_page(id):
#id = int(input(print("Enter the ID of the page:")))
cf.remove_page(id, status=None, recursive=False)
print("Page Deleted")
#return(print(json.dumps("Page Deleted")))
#return(json.dumps("Page Deleted"))
# def check_page_exists(space, title):
# #space = input(print("Enter the name of the space to be checked:"))
# #title = input(print("Enter the title of the page that has to be checked:"))
# pprint(cf.page_exists(space, title))
# #return (pprint(json.dumps(cf.page_exists(space, title))))
# return (json.dumps(cf.page_exists(space, title)))
# def get_page_id(space, title):
# #space = input(print("Enter the space name:"))
# #title = input(print("Enter the title of the page:"))
# print(cf.get_page_id(space, title))
# return (json.dumps(cf.get_page_id(space, title)))
# #return (print(json.dumps(cf.get_page_id(space, title))))
# def get_page_space(id):
# #id = int(input(print("Enter the page ID:")))
# print(cf.get_page_space(id))
# return (json.dumps(cf.get_page_space(id)))
# # Page Info using title
# def page_info_by_title(space, title):
# a = cf.get_page_by_title(space, title, start=None, limit=None)
# d = {}
# d['id'] = a['id']
# d['status'] = a['status']
# d['title'] = a['title']
# d['type'] = a['type']
# return(json.dumps(d, indent = 2))
# Page info using ID
def page_info_by_id(id):
a = cf.get_page_by_id(id, expand=None, status=None, version=None)
d = {}
d['id'] = a['id']
d['status'] = a['status']
d['title'] = a['title']
d['type'] = a['type']
d['space_name'] = a['space']['name']
d['space_key'] = a['space']['key']
d['space_id'] = a['space']['id']
d['creator_email'] = a['history']['createdBy']['email']
d['creator_displayName'] = a['history']['createdBy']['displayName']
d['created_date'] = a['history']['createdDate']
return(json.dumps(d, indent = 2))
# Export Page as PDF
def export_page_as_pdf(id, name):
#id = int(input(print("Enter the page ID:")))
#name = input(print("Enter the name of the pdf file to be created:"))
a = cf.export_page(id)
def save_file(content):
file_pdf = open(name, 'wb')
file_pdf.write(content)
file_pdf.close()
print("Completed")
save_file(content=a)
#print(a)
print("Page Exported")
return (json.dumps("Page Exported"))
#return (print(json.dumps("Page Exported")))
``` |
{
"source": "jkhargharia/python-dlpy",
"score": 2
} |
#### File: dlpy/tests/test_embedding_model.py
```python
import os
import unittest
import swat
import swat.utils.testing as tm
from dlpy import Dense
from dlpy.applications import ResNet18_Caffe, MobileNetV1, Model
from dlpy.embedding_model import EmbeddingModel
from dlpy.image_embedding import ImageEmbeddingTable
from dlpy.lr_scheduler import StepLR
from dlpy.model import AdamSolver, Optimizer
from dlpy.model import Gpu
class TestImageEmbeddingModel(unittest.TestCase):
# Create a class attribute to hold the cas host type
server_type = None
s = None
server_sep = '/'
data_dir = None
@classmethod
def setUpClass(cls):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
cls.s = swat.CAS()
cls.server_type = tm.get_cas_host_type(cls.s)
cls.server_sep = '\\'
if cls.server_type.startswith("lin") or cls.server_type.startswith("osx"):
cls.server_sep = '/'
if 'DLPY_DATA_DIR' in os.environ:
cls.data_dir = os.environ.get('DLPY_DATA_DIR')
if cls.data_dir.endswith(cls.server_sep):
cls.data_dir = cls.data_dir[:-1]
cls.data_dir += cls.server_sep
if "DLPY_DATA_DIR_LOCAL" in os.environ:
cls.local_dir = os.environ.get("DLPY_DATA_DIR_LOCAL")
# the server path that points to DLPY_DATA_DIR_LOCAL
if "DLPY_DATA_DIR_SERVER" in os.environ:
cls.server_dir = os.environ.get("DLPY_DATA_DIR_SERVER")
if cls.server_dir.endswith(cls.server_sep):
cls.server_dir = cls.server_dir[:-1]
cls.server_dir += cls.server_sep
@classmethod
def tearDownClass(cls):
# tear down tests
try:
cls.s.terminate()
except swat.SWATError:
pass
del cls.s
swat.reset_option()
def test_embedding_model_siamese(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test default
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_flip='HV',
random_mutation='random'
)
branch = resnet18_model.to_functional_model(stop_layers=resnet18_model.layers[-1])
model = EmbeddingModel.build_embedding_model(branch)
res = model.print_summary()
# print(res)
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 2)
# test options
embedding_layer = Dense(n=10)
model1 = EmbeddingModel.build_embedding_model(branch, model_table='test',
embedding_model_type='siamese', margin=3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
# print(res1)
self.assertEqual(res1[res1['Layer'].str.contains(model1.embedding_layer_name_prefix)].shape[0], 2)
# test passing in a sequential model
model2 = EmbeddingModel.build_embedding_model(resnet18_model)
res2 = model2.print_summary()
self.assertEqual(res2[res2['Layer'].str.contains(model2.embedding_layer_name_prefix)].shape[0], 2)
model3 = EmbeddingModel.build_embedding_model(resnet18_model, model_table='test2',
embedding_model_type='siamese', margin=3.0,
embedding_layer=embedding_layer)
res3 = model3.print_summary()
self.assertEqual(res3[res3['Layer'].str.contains(model3.embedding_layer_name_prefix)].shape[0], 2)
def test_embedding_model_siamese_1(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test passing in a functional model
vgg16 = Model(self.s)
vgg16.load(path=self.data_dir + 'vgg16.sashdat')
model = EmbeddingModel.build_embedding_model(vgg16)
res = model.print_summary()
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 2)
def test_embedding_model_triplet(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test triplet
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_flip='HV',
random_mutation='random'
)
branch = resnet18_model.to_functional_model(stop_layers=resnet18_model.layers[-1])
model = EmbeddingModel.build_embedding_model(branch, model_table='test',
embedding_model_type='triplet', margin=-3.0)
res = model.print_summary()
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 3)
# test embedding layer
embedding_layer = Dense(n=10)
model1 = EmbeddingModel.build_embedding_model(branch, model_table='test',
embedding_model_type='triplet', margin=-3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
# print(res1)
self.assertEqual(res1[res1['Layer'].str.contains(model1.embedding_layer_name_prefix)].shape[0], 3)
# test passing in a sequential model
model2 = EmbeddingModel.build_embedding_model(resnet18_model,
embedding_model_type='triplet', margin=-3.0,
embedding_layer=embedding_layer)
res2 = model2.print_summary()
self.assertEqual(res2[res2['Layer'].str.contains(model2.embedding_layer_name_prefix)].shape[0], 3)
def test_embedding_model_triplet_1(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test passing in a functional model
vgg16 = Model(self.s)
vgg16.load(path=self.data_dir + 'vgg16.sashdat')
embedding_layer = Dense(n=10)
model = EmbeddingModel.build_embedding_model(vgg16,
embedding_model_type='triplet', margin=-3.0,
embedding_layer=embedding_layer)
res = model.print_summary()
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 3)
def test_embedding_model_quartet(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test triplet
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_flip='HV',
random_mutation='random'
)
branch = resnet18_model.to_functional_model(stop_layers=resnet18_model.layers[-1])
model = EmbeddingModel.build_embedding_model(branch, model_table='test',
embedding_model_type='quartet', margin=-3.0)
res = model.print_summary()
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 4)
# test embedding layer
embedding_layer = Dense(n=10)
model1 = EmbeddingModel.build_embedding_model(branch, model_table='test',
embedding_model_type='quartet', margin=-3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
# print(res1)
self.assertEqual(res1[res1['Layer'].str.contains(model1.embedding_layer_name_prefix)].shape[0], 4)
# test passing in a sequential model
model2 = EmbeddingModel.build_embedding_model(resnet18_model,
embedding_model_type='quartet', margin=-3.0,
embedding_layer=embedding_layer)
res2 = model2.print_summary()
self.assertEqual(res2[res2['Layer'].str.contains(model2.embedding_layer_name_prefix)].shape[0], 4)
def test_embedding_model_quartet_1(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test passing in a functional model
vgg16 = Model(self.s)
vgg16.load(path=self.data_dir + 'vgg16.sashdat')
embedding_layer = Dense(n=10)
model = EmbeddingModel.build_embedding_model(vgg16,
embedding_model_type='quartet', margin=-3.0,
embedding_layer=embedding_layer)
res = model.print_summary()
self.assertEqual(res[res['Layer'].str.contains(model.embedding_layer_name_prefix)].shape[0], 4)
# test fit with the data option
def test_siamese_fit(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test using one data table
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_crop='RESIZETHENCROP',
random_flip='HV',
random_mutation='random'
)
embedding_layer = Dense(n=4)
model1 = EmbeddingModel.build_embedding_model(resnet18_model, model_table='test1',
embedding_model_type='siamese', margin=3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
self.assertEqual(res1[res1['Layer'].str.contains(model1.embedding_layer_name_prefix)].shape[0], 2)
img_path = self.server_dir + 'DogBreed_small'
my_images = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=64, embedding_model_type='siamese')
solver = AdamSolver(lr_scheduler=StepLR(learning_rate=0.0001, step_size=20), clip_grad_max=100,
clip_grad_min=-100)
optimizer = Optimizer(algorithm=solver, mini_batch_size=8, log_level=3, max_epochs=5, reg_l2=0.0001)
gpu = Gpu(devices=1)
train_res = model1.fit_embedding_model(data=my_images, optimizer=optimizer, n_threads=1, gpu=gpu, seed=1234,
record_seed=23435)
print(train_res)
score_res = model1.predict(data=my_images, gpu=gpu, random_crop='RESIZETHENCROP')
print(score_res)
# test deploy as astore
self.s.loadactionset('astore')
my_images_test = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=5,
embedding_model_type='siamese',
resize_width=224, resize_height=224)
# case 1: deploy the full model as astore
model1.deploy_embedding_model(output_format='astore', model_type='full',
path=self.local_dir)
full_astore = os.path.join(self.local_dir, model1.model_name + '.astore')
with open(full_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_full', replace=True), store=store_)
# run with one gpu
res2 = self.s.score(rstore=dict(name='test1_full'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_', '_path_xx'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_full_gpu', replace=True))
print(res2)
res = self.s.fetch(table='astore_score1_full_gpu')
print(res)
# remove the astore file
os.remove(full_astore)
# case 2: deploy the branch model as astore
model1.deploy_embedding_model(output_format='astore', model_type='branch',
path=self.local_dir)
br_astore = os.path.join(self.local_dir, model1.model_name + '_branch.astore')
with open(br_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_br', replace=True), store=store_)
# run with one gpu
self.s.score(rstore=dict(name='test1_br'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_br_gpu', replace=True))
res = self.s.fetch(table='astore_score1_br_gpu')
print(res)
os.remove(br_astore)
# test fit with the path option
def test_siamese_fit_1(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test using one data table
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_crop='RESIZETHENCROP',
random_flip='HV',
random_mutation='random'
)
embedding_layer = Dense(n=4)
model1 = EmbeddingModel.build_embedding_model(resnet18_model, model_table='test1',
embedding_model_type='siamese', margin=3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
self.assertEqual(res1[res1['Layer'].str.contains(model1.embedding_layer_name_prefix)].shape[0], 2)
img_path = self.server_dir + 'DogBreed_small'
my_images = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=64, embedding_model_type='siamese')
solver = AdamSolver(lr_scheduler=StepLR(learning_rate=0.0001, step_size=20), clip_grad_max=100,
clip_grad_min=-100)
optimizer = Optimizer(algorithm=solver, mini_batch_size=8, log_level=3, max_epochs=2, reg_l2=0.0001)
gpu = Gpu(devices=1)
train_res = model1.fit_embedding_model(optimizer=optimizer, n_threads=1, gpu=gpu,
path=img_path, n_samples=64, max_iter=2,
seed=1234, record_seed=23435)
print(train_res)
score_res = model1.predict(data=my_images, gpu=gpu, random_crop='RESIZETHENCROP')
print(score_res)
# test fit with the data option
def test_triplet_fit(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test using one data table
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_crop='RESIZETHENCROP',
random_flip='HV',
random_mutation='random'
)
embedding_layer = Dense(n=4)
model1 = EmbeddingModel.build_embedding_model(resnet18_model, model_table='test1',
embedding_model_type='triplet', margin=-3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
print(res1)
img_path = self.server_dir + 'DogBreed_small'
my_images = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=64, embedding_model_type='triplet')
solver = AdamSolver(lr_scheduler=StepLR(learning_rate=0.0001, step_size=20), clip_grad_max=100,
clip_grad_min=-100)
optimizer = Optimizer(algorithm=solver, mini_batch_size=8, log_level=3, max_epochs=5, reg_l2=0.0001)
gpu = Gpu(devices=1)
train_res = model1.fit_embedding_model(data=my_images, optimizer=optimizer, n_threads=1, gpu=gpu, seed=1234,
record_seed=23435)
print(train_res)
score_res = model1.predict(data=my_images, gpu=gpu, random_crop='RESIZETHENCROP')
print(score_res)
# test deploy as astore
self.s.loadactionset('astore')
my_images_test = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=5,
embedding_model_type='triplet',
resize_width=224, resize_height=224)
# case 1: deploy the full model as astore
model1.deploy_embedding_model(output_format='astore', model_type='full',
path=self.local_dir)
full_astore = os.path.join(self.local_dir, model1.model_name + '.astore')
with open(full_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_full', replace=True), store=store_)
# run with one gpu
self.s.score(rstore=dict(name='test1_full'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_', '_path_1', '_path_2'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_full_gpu', replace=True))
res = self.s.fetch(table='astore_score1_full_gpu')
print(res)
# remove the astore file
os.remove(full_astore)
# case 2: deploy the branch model as astore
model1.deploy_embedding_model(output_format='astore', model_type='branch',
path=self.local_dir)
br_astore = os.path.join(self.local_dir, model1.model_name + '_branch.astore')
with open(br_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_br', replace=True), store=store_)
# run with one gpu
self.s.score(rstore=dict(name='test1_br'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_br_gpu', replace=True))
res = self.s.fetch(table='astore_score1_br_gpu')
print(res)
os.remove(br_astore)
# test fit with the data option
def test_quartet_fit(self):
if self.server_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")
# test using one data table
resnet18_model = ResNet18_Caffe(self.s,
width=224,
height=224,
random_crop='RESIZETHENCROP',
random_flip='HV',
random_mutation='random'
)
embedding_layer = Dense(n=4)
model1 = EmbeddingModel.build_embedding_model(resnet18_model, model_table='test1',
embedding_model_type='quartet', margin=-3.0,
embedding_layer=embedding_layer)
res1 = model1.print_summary()
print(res1)
img_path = self.server_dir + 'DogBreed_small'
my_images = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=64, embedding_model_type='quartet')
solver = AdamSolver(lr_scheduler=StepLR(learning_rate=0.0001, step_size=20), clip_grad_max=100,
clip_grad_min=-100)
optimizer = Optimizer(algorithm=solver, mini_batch_size=8, log_level=3, max_epochs=5, reg_l2=0.0001)
gpu = Gpu(devices=1)
train_res = model1.fit_embedding_model(data=my_images, optimizer=optimizer, n_threads=1, gpu=gpu, seed=1234,
record_seed=23435)
print(train_res)
score_res = model1.predict(data=my_images, gpu=gpu, random_crop='RESIZETHENCROP')
print(score_res)
# test deploy as astore
self.s.loadactionset('astore')
my_images_test = ImageEmbeddingTable.load_files(self.s, path=img_path, n_samples=5,
embedding_model_type='quartet',
resize_width=224, resize_height=224)
# case 1: deploy the full model as astore
model1.deploy_embedding_model(output_format='astore', model_type='full',
path=self.local_dir)
full_astore = os.path.join(self.local_dir, model1.model_name + '.astore')
with open(full_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_full', replace=True), store=store_)
# run with one gpu
self.s.score(rstore=dict(name='test1_full'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_', '_path_1', '_path_2', '_path_3'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_full_gpu', replace=True))
res = self.s.fetch(table='astore_score1_full_gpu')
print(res)
# remove the astore file
os.remove(full_astore)
# case 2: deploy the branch model as astore
model1.deploy_embedding_model(output_format='astore', model_type='branch',
path=self.local_dir)
br_astore = os.path.join(self.local_dir, model1.model_name + '_branch.astore')
with open(br_astore, mode='rb') as file:
file_content = file.read()
store_ = swat.blob(file_content)
self.s.astore.upload(rstore=dict(name='test1_br', replace=True), store=store_)
# run with one gpu
self.s.score(rstore=dict(name='test1_br'),
table=my_images_test,
nthreads=1,
# _debug=dict(ranks=0),
copyvars=['_path_'],
options=[dict(name='usegpu', value='1'),
dict(name='NDEVICES', value='1'),
dict(name='DEVICE0', value='0')
],
out=dict(name='astore_score1_br_gpu', replace=True))
res = self.s.fetch(table='astore_score1_br_gpu')
print(res)
os.remove(br_astore)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.